From cfbolz at codespeak.net Wed Sep 1 17:50:59 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Wed, 1 Sep 2010 17:50:59 +0200 (CEST) Subject: [pypy-svn] r76815 - pypy/branch/better-map-instances/pypy/interpreter Message-ID: <20100901155059.70D26282BD6@codespeak.net> Author: cfbolz Date: Wed Sep 1 17:50:57 2010 New Revision: 76815 Modified: pypy/branch/better-map-instances/pypy/interpreter/typedef.py Log: (arigo, cfbolz): be a bit more object-oriented and don't just copy the code from the base user_setup Modified: pypy/branch/better-map-instances/pypy/interpreter/typedef.py ============================================================================== --- pypy/branch/better-map-instances/pypy/interpreter/typedef.py (original) +++ pypy/branch/better-map-instances/pypy/interpreter/typedef.py Wed Sep 1 17:50:57 2010 @@ -271,6 +271,9 @@ wantdict = False if wantdict: + base_user_setup = supercls.user_setup.im_func + if "user_setup" in body: + base_user_setup = body["user_setup"] class Proto(object): def getdict(self): return self.w__dict__ @@ -279,11 +282,9 @@ self.w__dict__ = check_new_dictionary(space, w_dict) def user_setup(self, space, w_subtype): - self.space = space - self.w__class__ = w_subtype self.w__dict__ = space.newdict( instance=True, classofinstance=w_subtype) - self.user_setup_slots(w_subtype.nslots) + base_user_setup(self, space, w_subtype) def setclass(self, space, w_subtype): # only used by descr_set___class__ From cfbolz at codespeak.net Wed Sep 1 17:55:18 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Wed, 1 Sep 2010 17:55:18 +0200 (CEST) Subject: [pypy-svn] r76816 - in pypy/branch/better-map-instances/pypy/module/__builtin__: . test Message-ID: <20100901155518.7CE9B282BD6@codespeak.net> Author: cfbolz Date: Wed Sep 1 17:55:16 2010 New Revision: 76816 Modified: pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py pypy/branch/better-map-instances/pypy/module/__builtin__/test/test_classobj.py Log: (arigo, cfbolz): please don't subclass old-style InstanceType Modified: pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py ============================================================================== --- pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py (original) +++ pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py Wed Sep 1 17:55:16 2010 @@ -757,6 +757,7 @@ unwrap_spec=['self', ObjSpace]), **rawdict ) +W_InstanceObject.typedef.acceptable_as_base_class = False class W_InstanceObjectWithDel(W_InstanceObject): def __del__(self): Modified: pypy/branch/better-map-instances/pypy/module/__builtin__/test/test_classobj.py ============================================================================== --- pypy/branch/better-map-instances/pypy/module/__builtin__/test/test_classobj.py (original) +++ pypy/branch/better-map-instances/pypy/module/__builtin__/test/test_classobj.py Wed Sep 1 17:55:16 2010 @@ -767,6 +767,17 @@ finally: warnings.simplefilter('default', RuntimeWarning) + def test_cant_subclass_instance(self): + class A: + pass + try: + class B(type(A())): + pass + except TypeError: + pass + else: + assert 0, "should have raised" + class AppTestOldStyleSharing(AppTestOldstyle): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withsharingdict": True}) From hakanardo at codespeak.net Wed Sep 1 18:00:22 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Wed, 1 Sep 2010 18:00:22 +0200 (CEST) Subject: [pypy-svn] r76817 - pypy/branch/jit-bounds/pypy/module/pypyjit/test Message-ID: <20100901160022.6555C282BD6@codespeak.net> Author: hakanardo Date: Wed Sep 1 18:00:20 2010 New Revision: 76817 Modified: pypy/branch/jit-bounds/pypy/module/pypyjit/test/randomized.py Log: save failures to file Modified: pypy/branch/jit-bounds/pypy/module/pypyjit/test/randomized.py ============================================================================== --- pypy/branch/jit-bounds/pypy/module/pypyjit/test/randomized.py (original) +++ pypy/branch/jit-bounds/pypy/module/pypyjit/test/randomized.py Wed Sep 1 18:00:20 2010 @@ -101,6 +101,7 @@ r.close() return res, err +fcnt = 0 while True: code = ''' try: # make the file runnable by CPython @@ -116,9 +117,14 @@ r1,e1 = run('/usr/bin/python', code) r2,e2 = run('../../../translator/goal/pypy-c', code) if r1 != r2: - print - print '******************** FAILED ******************' - print code - print 'cpython: ', r1, e1 - print 'pypy: ', r2, e2 + rapport = '******************** FAILED ******************\n' + \ + code + "\n" + \ + 'cpython: %s %s\n' % (r1, e1) + \ + 'pypy: %s %s\n' % (r2, e2) + fcnt += 1 + f = open('failures/%d' % fcnt, "w") + f.write(rapport) + f.close() + print rapport + From hakanardo at codespeak.net Wed Sep 1 18:02:02 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Wed, 1 Sep 2010 18:02:02 +0200 (CEST) Subject: [pypy-svn] r76818 - in pypy/branch/jit-bounds: . lib-python/modified-2.5.2/test pypy/config pypy/interpreter pypy/interpreter/test pypy/jit/backend/test pypy/jit/backend/x86 pypy/jit/backend/x86/test pypy/jit/tool pypy/jit/tool/test pypy/module/_socket pypy/module/_socket/test pypy/module/array/benchmark pypy/module/array/test pypy/rlib pypy/rpython/memory/gc pypy/tool pypy/translator/c/test pypy/translator/goal pypy/translator/goal/test2 pypy/translator/platform pypy/translator/platform/test Message-ID: <20100901160202.E3F51282BD6@codespeak.net> Author: hakanardo Date: Wed Sep 1 18:02:00 2010 New Revision: 76818 Removed: pypy/branch/jit-bounds/lib-python/modified-2.5.2/test/test_re.py Modified: pypy/branch/jit-bounds/ (props changed) pypy/branch/jit-bounds/pypy/config/translationoption.py pypy/branch/jit-bounds/pypy/interpreter/generator.py pypy/branch/jit-bounds/pypy/interpreter/pyframe.py pypy/branch/jit-bounds/pypy/interpreter/test/test_generator.py pypy/branch/jit-bounds/pypy/jit/backend/test/runner_test.py pypy/branch/jit-bounds/pypy/jit/backend/x86/assembler.py pypy/branch/jit-bounds/pypy/jit/backend/x86/test/test_runner.py pypy/branch/jit-bounds/pypy/jit/tool/test/test_traceviewer.py pypy/branch/jit-bounds/pypy/jit/tool/traceviewer.py pypy/branch/jit-bounds/pypy/module/_socket/interp_socket.py pypy/branch/jit-bounds/pypy/module/_socket/test/test_sock_app.py pypy/branch/jit-bounds/pypy/module/array/benchmark/Makefile (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/intimg.c (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/intimgtst.c (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/intimgtst.py (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/loop.c (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/sum.c (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/sumtst.c (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/sumtst.py (props changed) pypy/branch/jit-bounds/pypy/module/array/test/test_array_old.py (props changed) pypy/branch/jit-bounds/pypy/rlib/rlocale.py pypy/branch/jit-bounds/pypy/rpython/memory/gc/markcompact.py pypy/branch/jit-bounds/pypy/tool/runsubprocess.py pypy/branch/jit-bounds/pypy/translator/c/test/test_standalone.py pypy/branch/jit-bounds/pypy/translator/goal/app_main.py pypy/branch/jit-bounds/pypy/translator/goal/test2/test_app_main.py pypy/branch/jit-bounds/pypy/translator/goal/translate.py pypy/branch/jit-bounds/pypy/translator/platform/__init__.py pypy/branch/jit-bounds/pypy/translator/platform/test/test_platform.py Log: svn merge -r76716:76816 svn+ssh://hakanardo at codespeak.net/svn/pypy/trunk Modified: pypy/branch/jit-bounds/pypy/config/translationoption.py ============================================================================== --- pypy/branch/jit-bounds/pypy/config/translationoption.py (original) +++ pypy/branch/jit-bounds/pypy/config/translationoption.py Wed Sep 1 18:02:00 2010 @@ -342,9 +342,12 @@ 'jit': 'hybrid extraopts jit', } -# For now, 64-bit JIT requires boehm -if IS_64_BITS: - OPT_TABLE['jit'] = OPT_TABLE['jit'].replace('hybrid', 'boehm') +def final_check_config(config): + # For now, 64-bit JIT requires boehm. You have to say it explicitly + # with --gc=boehm, so that you don't get boehm by mistake. + if IS_64_BITS: + if config.translation.jit and config.translation.gc != 'boehm': + raise ConfigError("for now, 64-bit JIT requires --gc=boehm") def set_opt_level(config, level): """Apply optimization suggestions on the 'config'. Modified: pypy/branch/jit-bounds/pypy/interpreter/generator.py ============================================================================== --- pypy/branch/jit-bounds/pypy/interpreter/generator.py (original) +++ pypy/branch/jit-bounds/pypy/interpreter/generator.py Wed Sep 1 18:02:00 2010 @@ -1,7 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.gateway import NoneNotWrapped -from pypy.rlib.rarithmetic import intmask from pypy.rlib import jit from pypy.interpreter.pyopcode import LoopBlock @@ -37,13 +36,17 @@ return next yielded value or raise StopIteration.""" return self.send_ex(w_arg) - def send_ex(self, w_arg, exc=False): + def send_ex(self, w_arg, operr=None): space = self.space if self.running: raise OperationError(space.w_ValueError, space.wrap('generator already executing')) if self.frame.frame_finished_execution: - raise OperationError(space.w_StopIteration, space.w_None) + # xxx a bit ad-hoc, but we don't want to go inside + # execute_generator_frame() if the frame is actually finished + if operr is None: + operr = OperationError(space.w_StopIteration, space.w_None) + raise operr # XXX it's not clear that last_instr should be promoted at all # but as long as it is necessary for call_assembler, let's do it early last_instr = jit.hint(self.frame.last_instr, promote=True) @@ -57,7 +60,7 @@ self.running = True try: try: - w_result = self.frame.execute_generator_frame(w_arg, exc) + w_result = self.frame.execute_generator_frame(w_arg, operr) except OperationError: # errors finish a frame self.frame.frame_finished_execution = True @@ -89,12 +92,7 @@ operr = OperationError(w_type, w_val, tb) operr.normalize_exception(space) - - ec = space.getexecutioncontext() - next_instr = self.frame.handle_operation_error(ec, operr) - self.frame.last_instr = intmask(next_instr - 1) - - return self.send_ex(space.w_None, True) + return self.send_ex(space.w_None, operr) def descr_next(self): """next() -> the next value, or raise StopIteration""" Modified: pypy/branch/jit-bounds/pypy/interpreter/pyframe.py ============================================================================== --- pypy/branch/jit-bounds/pypy/interpreter/pyframe.py (original) +++ pypy/branch/jit-bounds/pypy/interpreter/pyframe.py Wed Sep 1 18:02:00 2010 @@ -10,6 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated, instantiate from pypy.rlib.jit import hint from pypy.rlib.debug import make_sure_not_resized +from pypy.rlib.rarithmetic import intmask from pypy.rlib import jit, rstack from pypy.tool import stdlib_opcode @@ -125,8 +126,12 @@ else: return self.execute_frame() - def execute_generator_frame(self, w_inputvalue, ex=False): - if self.last_instr != -1 and not ex: + def execute_generator_frame(self, w_inputvalue, operr=None): + if operr is not None: + ec = self.space.getexecutioncontext() + next_instr = self.handle_operation_error(ec, operr) + self.last_instr = intmask(next_instr - 1) + elif self.last_instr != -1: self.pushvalue(w_inputvalue) return self.execute_frame() Modified: pypy/branch/jit-bounds/pypy/interpreter/test/test_generator.py ============================================================================== --- pypy/branch/jit-bounds/pypy/interpreter/test/test_generator.py (original) +++ pypy/branch/jit-bounds/pypy/interpreter/test/test_generator.py Wed Sep 1 18:02:00 2010 @@ -126,6 +126,25 @@ raises(ValueError, g.throw, ValueError) assert g.gi_frame is None + def test_throw_bug(self): + def f(): + try: + x.throw(IndexError) # => "generator already executing" + except ValueError: + yield 1 + x = f() + res = list(x) + assert res == [1] + + def test_throw_on_finished_generator(self): + def f(): + yield 1 + g = f() + res = g.next() + assert res == 1 + raises(StopIteration, g.next) + raises(NameError, g.throw, NameError) + def test_close(self): def f(): yield 1 Modified: pypy/branch/jit-bounds/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/backend/test/runner_test.py (original) +++ pypy/branch/jit-bounds/pypy/jit/backend/test/runner_test.py Wed Sep 1 18:02:00 2010 @@ -785,6 +785,20 @@ 'float', descr=arraydescr) assert r.value == 4.5 + # For platforms where sizeof(INT) != sizeof(Signed) (ie, x86-64) + a_box, A = self.alloc_array_of(rffi.INT, 342) + arraydescr = self.cpu.arraydescrof(A) + assert not arraydescr.is_array_of_pointers() + r = self.execute_operation(rop.ARRAYLEN_GC, [a_box], + 'int', descr=arraydescr) + assert r.value == 342 + r = self.execute_operation(rop.SETARRAYITEM_GC, [a_box, BoxInt(310), + BoxInt(7441)], + 'void', descr=arraydescr) + assert r is None + r = self.execute_operation(rop.GETARRAYITEM_GC, [a_box, BoxInt(310)], + 'int', descr=arraydescr) + assert r.value == 7441 def test_string_basic(self): s_box = self.alloc_string("hello\xfe") Modified: pypy/branch/jit-bounds/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/backend/x86/assembler.py (original) +++ pypy/branch/jit-bounds/pypy/jit/backend/x86/assembler.py Wed Sep 1 18:02:00 2010 @@ -1046,7 +1046,10 @@ self.mc.MOVZX8(resloc, source_addr) elif size == 2: self.mc.MOVZX16(resloc, source_addr) - elif size == WORD: + elif size == 4: + # MOV32 is zero-extending on 64-bit, so this is okay + self.mc.MOV32(resloc, source_addr) + elif IS_X86_64 and size == 8: self.mc.MOV(resloc, source_addr) else: raise NotImplementedError("getfield size = %d" % size) @@ -1059,19 +1062,18 @@ base_loc, ofs_loc, scale, ofs = arglocs assert isinstance(ofs, ImmedLoc) assert isinstance(scale, ImmedLoc) + src_addr = addr_add(base_loc, ofs_loc, ofs.value, scale.value) if op.result.type == FLOAT: - self.mc.MOVSD(resloc, addr_add(base_loc, ofs_loc, ofs.value, - scale.value)) + self.mc.MOVSD(resloc, src_addr) else: if scale.value == 0: - self.mc.MOVZX8(resloc, addr_add(base_loc, ofs_loc, ofs.value, - scale.value)) + self.mc.MOVZX8(resloc, src_addr) elif scale.value == 1: - self.mc.MOVZX16(resloc, addr_add(base_loc, ofs_loc, ofs.value, - scale.value)) - elif (1 << scale.value) == WORD: - self.mc.MOV(resloc, addr_add(base_loc, ofs_loc, ofs.value, - scale.value)) + self.mc.MOVZX16(resloc, src_addr) + elif scale.value == 2: + self.mc.MOV32(resloc, src_addr) + elif IS_X86_64 and scale.value == 3: + self.mc.MOV(resloc, src_addr) else: print "[asmgen]getarrayitem unsupported size: %d" % scale.value raise NotImplementedError() @@ -1086,8 +1088,10 @@ dest_addr = AddressLoc(base_loc, ofs_loc) if isinstance(value_loc, RegLoc) and value_loc.is_xmm: self.mc.MOVSD(dest_addr, value_loc) - elif size == WORD: + elif IS_X86_64 and size == 8: self.mc.MOV(dest_addr, value_loc) + elif size == 4: + self.mc.MOV32(dest_addr, value_loc) elif size == 2: self.mc.MOV16(dest_addr, value_loc) elif size == 1: @@ -1104,8 +1108,10 @@ if op.args[2].type == FLOAT: self.mc.MOVSD(dest_addr, value_loc) else: - if (1 << scale_loc.value) == WORD: + if IS_X86_64 and scale_loc.value == 3: self.mc.MOV(dest_addr, value_loc) + elif scale_loc.value == 2: + self.mc.MOV32(dest_addr, value_loc) elif scale_loc.value == 1: self.mc.MOV16(dest_addr, value_loc) elif scale_loc.value == 0: Modified: pypy/branch/jit-bounds/pypy/jit/backend/x86/test/test_runner.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/backend/x86/test/test_runner.py (original) +++ pypy/branch/jit-bounds/pypy/jit/backend/x86/test/test_runner.py Wed Sep 1 18:02:00 2010 @@ -193,6 +193,7 @@ def test_getfield_setfield(self): TP = lltype.GcStruct('x', ('s', lltype.Signed), + ('i', rffi.INT), ('f', lltype.Float), ('u', rffi.USHORT), ('c1', lltype.Char), @@ -201,6 +202,7 @@ res = self.execute_operation(rop.NEW, [], 'ref', self.cpu.sizeof(TP)) ofs_s = self.cpu.fielddescrof(TP, 's') + ofs_i = self.cpu.fielddescrof(TP, 'i') #ofs_f = self.cpu.fielddescrof(TP, 'f') ofs_u = self.cpu.fielddescrof(TP, 'u') ofsc1 = self.cpu.fielddescrof(TP, 'c1') @@ -218,6 +220,11 @@ ofs_s) s = self.execute_operation(rop.GETFIELD_GC, [res], 'int', ofs_s) assert s.value == 3 + + self.execute_operation(rop.SETFIELD_GC, [res, BoxInt(1234)], 'void', ofs_i) + i = self.execute_operation(rop.GETFIELD_GC, [res], 'int', ofs_i) + assert i.value == 1234 + #u = self.execute_operation(rop.GETFIELD_GC, [res, ofs_u], 'int') #assert u.value == 5 self.execute_operation(rop.SETFIELD_GC, [res, ConstInt(1)], 'void', Modified: pypy/branch/jit-bounds/pypy/jit/tool/test/test_traceviewer.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/tool/test/test_traceviewer.py (original) +++ pypy/branch/jit-bounds/pypy/jit/tool/test/test_traceviewer.py Wed Sep 1 18:02:00 2010 @@ -52,10 +52,10 @@ def test_postparse(self): real_loops = [FinalBlock("debug_merge_point(' #40 POP_TOP')", None)] - postprocess(real_loops, real_loops[:]) + postprocess(real_loops, real_loops[:], {}) assert real_loops[0].header.startswith("_runCallbacks, file '/tmp/x/twisted-trunk/twisted/internet/defer.py', line 357") def test_load_actual(self): fname = py.path.local(__file__).join('..', 'data.log.bz2') - main(str(fname), view=False) + main(str(fname), False, view=False) # assert did not explode Modified: pypy/branch/jit-bounds/pypy/jit/tool/traceviewer.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/tool/traceviewer.py (original) +++ pypy/branch/jit-bounds/pypy/jit/tool/traceviewer.py Wed Sep 1 18:02:00 2010 @@ -1,11 +1,12 @@ #!/usr/bin/env python -""" Usage: traceviewer.py loopfile +""" Usage: traceviewer.py [--use-threshold] loopfile """ import optparse import sys import re import math +import py import autopath from pypy.translator.tool.graphpage import GraphPage @@ -40,13 +41,13 @@ self.source = dotgen.generate(target=None) class Page(GraphPage): - def compute(self, graphs): + def compute(self, graphs, counts): dotgen = DotGen('trace') self.loops = graphs self.links = {} self.cache = {} for loop in self.loops: - loop.generate(dotgen) + loop.generate(dotgen, counts) loop.getlinks(self.links) self.cache["loop" + str(loop.no)] = loop self.source = dotgen.generate(target=None) @@ -71,9 +72,14 @@ def getlinks(self, links): links[self.linksource] = self.name() - def generate(self, dotgen): + def generate(self, dotgen, counts): + val = counts.get(self.key, 0) + if val > counts.threshold: + fillcolor = get_gradient_color(self.ratio) + else: + fillcolor = "white" dotgen.emit_node(self.name(), label=self.header, - shape='box', fillcolor=get_gradient_color(self.ratio)) + shape='box', fillcolor=fillcolor) def get_content(self): return self._content @@ -113,11 +119,11 @@ self.target = target BasicBlock.__init__(self, content) - def postprocess(self, loops, memo): - postprocess_loop(self.target, loops, memo) + def postprocess(self, loops, memo, counts): + postprocess_loop(self.target, loops, memo, counts) - def generate(self, dotgen): - BasicBlock.generate(self, dotgen) + def generate(self, dotgen, counts): + BasicBlock.generate(self, dotgen, counts) if self.target is not None: dotgen.emit_edge(self.name(), self.target.name()) @@ -127,12 +133,12 @@ self.right = right BasicBlock.__init__(self, content) - def postprocess(self, loops, memo): - postprocess_loop(self.left, loops, memo) - postprocess_loop(self.right, loops, memo) + def postprocess(self, loops, memo, counts): + postprocess_loop(self.left, loops, memo, counts) + postprocess_loop(self.right, loops, memo, counts) - def generate(self, dotgen): - BasicBlock.generate(self, dotgen) + def generate(self, dotgen, counts): + BasicBlock.generate(self, dotgen, counts) dotgen.emit_edge(self.name(), self.left.name()) dotgen.emit_edge(self.name(), self.right.name()) @@ -176,13 +182,11 @@ real_loops = [] counter = 1 bar = progressbar.ProgressBar(color='blue') - single_percent = len(loops) / 100 allloops = [] - for i, loop in enumerate(loops): + for i, loop in enumerate(loops): if i > MAX_LOOPS: return real_loops, allloops - if single_percent and i % single_percent == 0: - bar.render(i / single_percent) + bar.render((i * 100) / len(loops)) firstline = loop[:loop.find("\n")] m = re.match('# Loop (\d+)', firstline) if m: @@ -202,17 +206,19 @@ counter += loop.count("\n") + 2 return real_loops, allloops -def postprocess_loop(loop, loops, memo): +def postprocess_loop(loop, loops, memo, counts): if loop in memo: return memo.add(loop) if loop is None: return - m = re.search("debug_merge_point\(' (.*?)'", loop.content) + m = re.search("debug_merge_point\('( (.*?))'", loop.content) if m is None: name = '?' + loop.key = '?' else: - name = m.group(1) + " " + m.group(2) + name = m.group(2) + " " + m.group(3) + loop.key = m.group(1) opsno = loop.content.count("\n") lastline = loop.content[loop.content.rfind("\n", 0, len(loop.content) - 2):] m = re.search('descr= 20 and options.use_threshold: + counts.threshold = l[-20] + else: + counts.threshold = 0 + for_print = [(v, k) for k, v in counts.iteritems()] + for_print.sort() + else: + counts = {} log = logparser.parse_log_file(loopfile) loops = logparser.extract_category(log, "jit-log-opt-") real_loops, allloops = splitloops(loops) - postprocess(real_loops, allloops) + postprocess(real_loops, allloops, counts) if view: - Page(allloops).display() + Page(allloops, counts).display() if __name__ == '__main__': parser = optparse.OptionParser(usage=__doc__) + parser.add_option('--use-threshold', dest='use_threshold', + action="store_true") options, args = parser.parse_args(sys.argv) if len(args) != 2: print __doc__ sys.exit(1) - main(args[1]) + main(args[1], options.use_threshold) Modified: pypy/branch/jit-bounds/pypy/module/_socket/interp_socket.py ============================================================================== --- pypy/branch/jit-bounds/pypy/module/_socket/interp_socket.py (original) +++ pypy/branch/jit-bounds/pypy/module/_socket/interp_socket.py Wed Sep 1 18:02:00 2010 @@ -74,7 +74,11 @@ This is like connect(address), but returns an error code (the errno value) instead of raising an exception when an error occurs. """ - error = self.connect_ex(self.addr_from_object(space, w_addr)) + try: + addr = self.addr_from_object(space, w_addr) + except SocketError, e: + raise converted_error(space, e) + error = self.connect_ex(addr) return space.wrap(error) connect_ex_w.unwrap_spec = ['self', ObjSpace, W_Root] Modified: pypy/branch/jit-bounds/pypy/module/_socket/test/test_sock_app.py ============================================================================== --- pypy/branch/jit-bounds/pypy/module/_socket/test/test_sock_app.py (original) +++ pypy/branch/jit-bounds/pypy/module/_socket/test/test_sock_app.py Wed Sep 1 18:02:00 2010 @@ -339,6 +339,13 @@ name = s.getpeername() # Will raise socket.error if not connected assert name[1] == 80 s.close() + + def test_socket_connect_ex(self): + import _socket + s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) + # Make sure we get an app-level error, not an interp one. + raises(_socket.gaierror, s.connect_ex, ("wrong.invalid", 80)) + s.close() def test_socket_connect_typeerrors(self): tests = [ Modified: pypy/branch/jit-bounds/pypy/rlib/rlocale.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rlib/rlocale.py (original) +++ pypy/branch/jit-bounds/pypy/rlib/rlocale.py Wed Sep 1 18:02:00 2010 @@ -15,6 +15,12 @@ HAVE_LANGINFO = sys.platform != 'win32' HAVE_LIBINTL = sys.platform != 'win32' +if HAVE_LIBINTL: + try: + platform.verify_eci(ExternalCompilationInfo(includes=['libintl.h'])) + except platform.CompilationError: + HAVE_LIBINTL = False + class CConfig: includes = ['locale.h', 'limits.h'] Modified: pypy/branch/jit-bounds/pypy/rpython/memory/gc/markcompact.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/memory/gc/markcompact.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/memory/gc/markcompact.py Wed Sep 1 18:02:00 2010 @@ -3,7 +3,8 @@ from pypy.rpython.lltypesystem import lltype, llmemory, llarena, llgroup from pypy.rpython.memory.gc.base import MovingGCBase -from pypy.rlib.debug import ll_assert +from pypy.rlib.debug import ll_assert, have_debug_prints +from pypy.rlib.debug import debug_print, debug_start, debug_stop from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE from pypy.rpython.memory.support import get_address_stack, get_address_deque from pypy.rpython.memory.support import AddressDict @@ -98,8 +99,7 @@ self.space_size = self.param_space_size self.next_collect_after = self.param_space_size/2 # whatever... - if self.config.gcconfig.debugprint: - self.program_start_time = time.time() + self.program_start_time = time.time() self.space = llarena.arena_malloc(self.space_size, True) ll_assert(bool(self.space), "couldn't allocate arena") self.free = self.space @@ -289,15 +289,16 @@ return weakref_offsets def debug_collect_start(self): - if self.config.gcconfig.debugprint: - llop.debug_print(lltype.Void) - llop.debug_print(lltype.Void, - ".----------- Full collection ------------------") + if have_debug_prints(): + debug_start("gc-collect") + debug_print() + debug_print(".----------- Full collection ------------------") start_time = time.time() - return start_time + return start_time + return -1 def debug_collect_finish(self, start_time): - if self.config.gcconfig.debugprint: + if start_time != -1: end_time = time.time() elapsed_time = end_time - start_time self.total_collection_time += elapsed_time @@ -305,20 +306,16 @@ total_program_time = end_time - self.program_start_time ct = self.total_collection_time cc = self.total_collection_count - llop.debug_print(lltype.Void, - "| number of collections so far ", - cc) - llop.debug_print(lltype.Void, - "| total collections per second: ", - cc / total_program_time) - llop.debug_print(lltype.Void, - "| total time in markcompact-collect: ", - ct, "seconds") - llop.debug_print(lltype.Void, - "| percentage collection<->total time:", - ct * 100.0 / total_program_time, "%") - llop.debug_print(lltype.Void, - "`----------------------------------------------") + debug_print("| number of collections so far ", + cc) + debug_print("| total collections per second: ", + cc / total_program_time) + debug_print("| total time in markcompact-collect: ", + ct, "seconds") + debug_print("| percentage collection<->total time:", + ct * 100.0 / total_program_time, "%") + debug_print("`----------------------------------------------") + debug_stop("gc-collect") def update_run_finalizers(self): Modified: pypy/branch/jit-bounds/pypy/tool/runsubprocess.py ============================================================================== --- pypy/branch/jit-bounds/pypy/tool/runsubprocess.py (original) +++ pypy/branch/jit-bounds/pypy/tool/runsubprocess.py Wed Sep 1 18:02:00 2010 @@ -70,5 +70,5 @@ assert results.startswith('(') results = eval(results) if results[0] is None: - raise OSError(results[1]) + raise OSError('%s: %s' % (args[0], results[1])) return results Modified: pypy/branch/jit-bounds/pypy/translator/c/test/test_standalone.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/c/test/test_standalone.py (original) +++ pypy/branch/jit-bounds/pypy/translator/c/test/test_standalone.py Wed Sep 1 18:02:00 2010 @@ -604,6 +604,33 @@ out, err = cbuilder.cmdexec("a b") assert out == "3" + def test_gcc_options(self): + # check that the env var CC is correctly interpreted, even if + # it contains the compiler name followed by some options. + if sys.platform == 'win32': + py.test.skip("only for gcc") + + from pypy.rpython.lltypesystem import lltype, rffi + dir = udir.ensure('test_gcc_options', dir=1) + dir.join('someextraheader.h').write('#define someextrafunc() 42\n') + eci = ExternalCompilationInfo(includes=['someextraheader.h']) + someextrafunc = rffi.llexternal('someextrafunc', [], lltype.Signed, + compilation_info=eci) + + def entry_point(argv): + return someextrafunc() + + old_cc = os.environ.get('CC') + try: + os.environ['CC'] = 'gcc -I%s' % dir + t, cbuilder = self.compile(entry_point) + finally: + if old_cc is None: + del os.environ['CC'] + else: + os.environ['CC'] = old_cc + + class TestMaemo(TestStandalone): def setup_class(cls): py.test.skip("TestMaemo: tests skipped for now") Modified: pypy/branch/jit-bounds/pypy/translator/goal/app_main.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/goal/app_main.py (original) +++ pypy/branch/jit-bounds/pypy/translator/goal/app_main.py Wed Sep 1 18:02:00 2010 @@ -292,8 +292,8 @@ else: raise CommandLineError('unrecognized option %r' % (arg,)) i += 1 - sys.argv = argv[i:] - if not sys.argv: + sys.argv[:] = argv[i:] # don't change the list that sys.argv is bound to + if not sys.argv: # (relevant in case of "reload(sys)") sys.argv.append('') run_stdin = True return locals() @@ -478,6 +478,10 @@ reset.append(('PYTHONINSPECT', os.environ.get('PYTHONINSPECT', ''))) os.environ['PYTHONINSPECT'] = os.environ['PYTHONINSPECT_'] + # no one should change to which lists sys.argv and sys.path are bound + old_argv = sys.argv + old_path = sys.path + from pypy.module.sys.version import PYPY_VERSION sys.pypy_version_info = PYPY_VERSION sys.pypy_initial_path = pypy_initial_path @@ -490,3 +494,5 @@ sys.ps1 = '>>> ' # restore the normal ones, in case sys.ps2 = '... ' # we are dropping to CPython's prompt import os; os.environ.update(reset) + assert old_argv is sys.argv + assert old_path is sys.path Modified: pypy/branch/jit-bounds/pypy/translator/goal/test2/test_app_main.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/goal/test2/test_app_main.py (original) +++ pypy/branch/jit-bounds/pypy/translator/goal/test2/test_app_main.py Wed Sep 1 18:02:00 2010 @@ -1,6 +1,7 @@ """ Tests for the entry point of pypy-c, app_main.py. """ +from __future__ import with_statement import py import sys, os, re import autopath Modified: pypy/branch/jit-bounds/pypy/translator/goal/translate.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/goal/translate.py (original) +++ pypy/branch/jit-bounds/pypy/translator/goal/translate.py Wed Sep 1 18:02:00 2010 @@ -18,7 +18,7 @@ ArbitraryOption, StrOption, IntOption, Config, \ ChoiceOption, OptHelpFormatter from pypy.config.translationoption import get_combined_translation_config -from pypy.config.translationoption import set_opt_level +from pypy.config.translationoption import set_opt_level, final_check_config from pypy.config.translationoption import OPT_LEVELS, DEFAULT_OPT_LEVEL from pypy.config.translationoption import PLATFORMS, set_platform @@ -175,6 +175,9 @@ if 'handle_config' in targetspec_dic: targetspec_dic['handle_config'](config, translateconfig) + # perform checks (if any) on the final config + final_check_config(config) + if translateconfig.help: opt_parser.print_help() if 'print_help' in targetspec_dic: Modified: pypy/branch/jit-bounds/pypy/translator/platform/__init__.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/platform/__init__.py (original) +++ pypy/branch/jit-bounds/pypy/translator/platform/__init__.py Wed Sep 1 18:02:00 2010 @@ -99,15 +99,20 @@ self.__dict__ == other.__dict__) def key(self): - bits = [self.__class__.__name__, 'cc=%s' % self.cc] + bits = [self.__class__.__name__, 'cc=%r' % self.cc] for varname in self.relevant_environ: - bits.append('%s=%s' % (varname, os.environ.get(varname))) + bits.append('%s=%r' % (varname, os.environ.get(varname))) return ' '.join(bits) # some helpers which seem to be cross-platform enough def _execute_c_compiler(self, cc, args, outname, cwd=None): log.execute(cc + ' ' + ' '.join(args)) + # 'cc' can also contain some options for the C compiler; + # e.g. it can be "gcc -m32". We handle it by splitting on ' '. + cclist = cc.split() + cc = cclist[0] + args = cclist[1:] + args returncode, stdout, stderr = _run_subprocess(cc, args, self.c_environ, cwd) self._handle_error(returncode, stderr, stdout, outname) Modified: pypy/branch/jit-bounds/pypy/translator/platform/test/test_platform.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/platform/test/test_platform.py (original) +++ pypy/branch/jit-bounds/pypy/translator/platform/test/test_platform.py Wed Sep 1 18:02:00 2010 @@ -131,7 +131,7 @@ self.cc = 'xcc' x = XPlatform() res = x.key() - assert res.startswith('XPlatform cc=xcc CPATH=') + assert res.startswith("XPlatform cc='xcc' CPATH=") def test_equality(): class X(Platform): From cfbolz at codespeak.net Wed Sep 1 18:35:08 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Wed, 1 Sep 2010 18:35:08 +0200 (CEST) Subject: [pypy-svn] r76819 - pypy/branch/better-map-instances/pypy/module/__builtin__ Message-ID: <20100901163508.1C927282B9D@codespeak.net> Author: cfbolz Date: Wed Sep 1 18:35:06 2010 New Revision: 76819 Modified: pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py Log: (arigo, cfbolz): refactor old-style classes to use more of the typedef.py machinery. this simplifies things and makes old-style classes able to use mapdict. Modified: pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py ============================================================================== --- pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py (original) +++ pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py Wed Sep 1 18:35:06 2010 @@ -2,7 +2,7 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import ObjSpace, W_Root, NoneNotWrapped, applevel from pypy.interpreter.gateway import interp2app, ObjSpace -from pypy.interpreter.typedef import TypeDef, make_weakref_descr +from pypy.interpreter.typedef import TypeDef from pypy.interpreter.argument import Arguments from pypy.interpreter.baseobjspace import Wrappable from pypy.rlib.rarithmetic import r_uint, intmask @@ -57,6 +57,14 @@ self.bases_w = bases self.w_dict = w_dict + def instantiate(self, space): + cache = space.fromcache(Cache) + if self.lookup(space, '__del__') is not None: + w_inst = cache.cls_with_del(space, self) + else: + w_inst = cache.cls_without_del(space, self) + return w_inst + def getdict(self): return self.w_dict @@ -100,15 +108,15 @@ return False @jit.unroll_safe - def lookup(self, space, w_attr): + def lookup(self, space, attr): # returns w_value or interplevel None - w_result = space.finditem(self.w_dict, w_attr) + w_result = space.finditem_str(self.w_dict, attr) if w_result is not None: return w_result for base in self.bases_w: # XXX fix annotation of bases_w to be a list of W_ClassObjects assert isinstance(base, W_ClassObject) - w_result = base.lookup(space, w_attr) + w_result = base.lookup(space, attr) if w_result is not None: return w_result return None @@ -122,7 +130,7 @@ return space.wrap(self.name) elif name == "__bases__": return space.newtuple(self.bases_w) - w_value = self.lookup(space, w_attr) + w_value = self.lookup(space, name) if w_value is None: raise operationerrfmt( space.w_AttributeError, @@ -147,7 +155,7 @@ self.setbases(space, w_value) return elif name == "__del__": - if self.lookup(space, w_attr) is None: + if self.lookup(space, name) is None: msg = ("a __del__ method added to an existing class " "will not be called") space.warn(msg, space.w_RuntimeWarning) @@ -195,13 +203,20 @@ # NOT_RPYTHON return '' % self.name +class Cache: + def __init__(self, space): + from pypy.interpreter.typedef import _usersubclswithfeature + # evil + self.cls_without_del = _usersubclswithfeature( + space.config, W_InstanceObject, "dict", "weakref") + self.cls_with_del = _usersubclswithfeature( + space.config, self.cls_without_del, "del") + + def class_descr_call(space, w_self, __args__): self = space.interp_w(W_ClassObject, w_self) - if self.lookup(space, space.wrap('__del__')) is not None: - w_inst = W_InstanceObjectWithDel(space, self) - else: - w_inst = W_InstanceObject(space, self) - w_init = w_inst.getattr(space, space.wrap('__init__'), False) + w_inst = self.instantiate(space) + w_init = w_inst.getattr(space, '__init__', False) if w_init is not None: w_result = space.call_args(w_init, __args__) if not space.is_w(w_result, space.w_None): @@ -234,7 +249,7 @@ def make_unary_instance_method(name): def unaryop(self, space): - w_meth = self.getattr(space, space.wrap(name), True) + w_meth = self.getattr(space, name, True) return space.call_function(w_meth) unaryop.func_name = name return unaryop @@ -242,7 +257,7 @@ def make_binary_returning_notimplemented_instance_method(name): def binaryop(self, space, w_other): try: - w_meth = self.getattr(space, space.wrap(name), False) + w_meth = self.getattr(space, name, False) except OperationError, e: if e.match(space, space.w_AttributeError): return space.w_NotImplemented @@ -267,7 +282,7 @@ w_a = self w_b = w_other if w_a is self: - w_meth = self.getattr(space, space.wrap(specialname), False) + w_meth = self.getattr(space, specialname, False) if w_meth is None: return space.w_NotImplemented return space.call_function(w_meth, w_b) @@ -278,7 +293,7 @@ def rbinaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) if w_a is None or w_a is self: - w_meth = self.getattr(space, space.wrap(rspecialname), False) + w_meth = self.getattr(space, rspecialname, False) if w_meth is None: return space.w_NotImplemented return space.call_function(w_meth, w_other) @@ -302,53 +317,40 @@ raise OperationError( space.w_TypeError, space.wrap("instance() first arg must be class")) - if space.is_w(w_dict, space.w_None): - w_dict = None - elif not space.is_true(space.isinstance(w_dict, space.w_dict)): - raise OperationError( - space.w_TypeError, - space.wrap("instance() second arg must be dictionary or None")) - return W_InstanceObject(space, w_class, w_dict) + w_result = w_class.instantiate(space) + if not space.is_w(w_dict, space.w_None): + w_result.setdict(space, w_dict) + return w_result class W_InstanceObject(Wrappable): - def __init__(self, space, w_class, w_dict=None): - if w_dict is None: - w_dict = space.newdict(instance=True) + def __init__(self, space, w_class): + # note that user_setup is overridden by the typedef.py machinery + self.user_setup(space, space.gettypeobject(self.typedef)) assert isinstance(w_class, W_ClassObject) self.w_class = w_class - self.w_dict = w_dict - self.space = space - - def getdict(self): - return self.w_dict - def setdict(self, space, w_dict): - if (w_dict is None or - not space.is_true(space.isinstance(w_dict, space.w_dict))): - raise OperationError( - space.w_TypeError, - space.wrap("__dict__ must be a dictionary object")) - self.w_dict = w_dict + def user_setup(self, space, w_subtype): + self.space = space - def setclass(self, space, w_class): + def set_oldstyle_class(self, space, w_class): if w_class is None or not isinstance(w_class, W_ClassObject): raise OperationError( space.w_TypeError, space.wrap("__class__ must be set to a class")) self.w_class = w_class - - def getattr(self, space, w_name, exc=True): - w_result = space.finditem(self.w_dict, w_name) + def getattr(self, space, name, exc=True): + assert isinstance(name, str) + w_result = self.getdictvalue(space, name) if w_result is not None: return w_result - w_value = self.w_class.lookup(space, w_name) + w_value = self.w_class.lookup(space, name) if w_value is None: if exc: raise operationerrfmt( space.w_AttributeError, "%s instance has no attribute '%s'", - self.w_class.name, space.str_w(w_name)) + self.w_class.name, name) else: return None w_descr_get = space.lookup(w_value, '__get__') @@ -360,32 +362,33 @@ name = space.str_w(w_attr) if len(name) >= 8 and name[0] == '_': if name == "__dict__": - return self.w_dict + return self.getdict() elif name == "__class__": return self.w_class try: - return self.getattr(space, w_attr) + return self.getattr(space, name) except OperationError, e: if not e.match(space, space.w_AttributeError): raise - w_meth = self.getattr(space, space.wrap('__getattr__'), False) + w_meth = self.getattr(space, '__getattr__', False) if w_meth is not None: return space.call_function(w_meth, w_attr) raise def descr_setattr(self, space, w_name, w_value): name = unwrap_attr(space, w_name) - w_meth = self.getattr(space, space.wrap('__setattr__'), False) + w_meth = self.getattr(space, '__setattr__', False) if name and name[0] == "_": if name == '__dict__': self.setdict(space, w_value) return if name == '__class__': - self.setclass(space, w_value) + self.set_oldstyle_class(space, w_value) return if name == '__del__' and w_meth is None: - if (not isinstance(self, W_InstanceObjectWithDel) - and space.finditem(self.w_dict, w_name) is None): + cache = space.fromcache(Cache) + if (not isinstance(self, cache.cls_with_del) + and self.getdictvalue(space, '__del__') is None): msg = ("a __del__ method added to an instance " "with no __del__ in the class will not be called") space.warn(msg, space.w_RuntimeWarning) @@ -402,10 +405,10 @@ self.setdict(space, None) return elif name == '__class__': - # use setclass to raise the error - self.setclass(space, None) + # use set_oldstyle_class to raise the error + self.set_oldstyle_class(space, None) return - w_meth = self.getattr(space, space.wrap('__delattr__'), False) + w_meth = self.getattr(space, '__delattr__', False) if w_meth is not None: space.call_function(w_meth, w_name) else: @@ -416,7 +419,7 @@ self.w_class.name, name) def descr_repr(self, space): - w_meth = self.getattr(space, space.wrap('__repr__'), False) + w_meth = self.getattr(space, '__repr__', False) if w_meth is None: w_class = self.w_class mod = w_class.get_module_string(space) @@ -424,19 +427,19 @@ return space.call_function(w_meth) def descr_str(self, space): - w_meth = self.getattr(space, space.wrap('__str__'), False) + w_meth = self.getattr(space, '__str__', False) if w_meth is None: return self.descr_repr(space) return space.call_function(w_meth) def descr_unicode(self, space): - w_meth = self.getattr(space, space.wrap('__unicode__'), False) + w_meth = self.getattr(space, '__unicode__', False) if w_meth is None: return self.descr_str(space) return space.call_function(w_meth) def descr_len(self, space): - w_meth = self.getattr(space, space.wrap('__len__')) + w_meth = self.getattr(space, '__len__') w_result = space.call_function(w_meth) if space.is_true(space.isinstance(w_result, space.w_int)): if space.is_true(space.lt(w_result, space.wrap(0))): @@ -449,22 +452,22 @@ space.wrap("__len__() should return an int")) def descr_getitem(self, space, w_key): - w_meth = self.getattr(space, space.wrap('__getitem__')) + w_meth = self.getattr(space, '__getitem__') return space.call_function(w_meth, w_key) def descr_setitem(self, space, w_key, w_value): - w_meth = self.getattr(space, space.wrap('__setitem__')) + w_meth = self.getattr(space, '__setitem__') space.call_function(w_meth, w_key, w_value) def descr_delitem(self, space, w_key): - w_meth = self.getattr(space, space.wrap('__delitem__')) + w_meth = self.getattr(space, '__delitem__') space.call_function(w_meth, w_key) def descr_iter(self, space): - w_meth = self.getattr(space, space.wrap('__iter__'), False) + w_meth = self.getattr(space, '__iter__', False) if w_meth is not None: return space.call_function(w_meth) - w_meth = self.getattr(space, space.wrap('__getitem__'), False) + w_meth = self.getattr(space, '__getitem__', False) if w_meth is None: raise OperationError( space.w_TypeError, @@ -474,14 +477,14 @@ # don't see the point def descr_getslice(self, space, w_i, w_j): - w_meth = self.getattr(space, space.wrap('__getslice__'), False) + w_meth = self.getattr(space, '__getslice__', False) if w_meth is not None: return space.call_function(w_meth, w_i, w_j) else: return space.getitem(self, space.newslice(w_i, w_j, space.w_None)) def descr_setslice(self, space, w_i, w_j, w_sequence): - w_meth = self.getattr(space, space.wrap('__setslice__'), False) + w_meth = self.getattr(space, '__setslice__', False) if w_meth is not None: space.call_function(w_meth, w_i, w_j, w_sequence) else: @@ -489,20 +492,20 @@ w_sequence) def descr_delslice(self, space, w_i, w_j): - w_meth = self.getattr(space, space.wrap('__delslice__'), False) + w_meth = self.getattr(space, '__delslice__', False) if w_meth is not None: space.call_function(w_meth, w_i, w_j) else: return space.delitem(self, space.newslice(w_i, w_j, space.w_None)) def descr_call(self, space, __args__): - w_meth = self.getattr(space, space.wrap('__call__')) + w_meth = self.getattr(space, '__call__') return space.call_args(w_meth, __args__) def descr_nonzero(self, space): - w_func = self.getattr(space, space.wrap('__nonzero__'), False) + w_func = self.getattr(space, '__nonzero__', False) if w_func is None: - w_func = self.getattr(space, space.wrap('__len__'), False) + w_func = self.getattr(space, '__len__', False) if w_func is None: return space.w_True w_result = space.call_function(w_func) @@ -526,7 +529,7 @@ not isinstance(w_b, W_InstanceObject)): return space.cmp(w_a, w_b) if isinstance(w_a, W_InstanceObject): - w_func = w_a.getattr(space, space.wrap('__cmp__'), False) + w_func = w_a.getattr(space, '__cmp__', False) if w_func is not None: w_res = space.call_function(w_func, w_b) if space.is_w(w_res, space.w_NotImplemented): @@ -545,7 +548,7 @@ return space.wrap(-1) return space.wrap(0) if isinstance(w_b, W_InstanceObject): - w_func = w_b.getattr(space, space.wrap('__cmp__'), False) + w_func = w_b.getattr(space, '__cmp__', False) if w_func is not None: w_res = space.call_function(w_func, w_a) if space.is_w(w_res, space.w_NotImplemented): @@ -566,10 +569,10 @@ return space.w_NotImplemented def descr_hash(self, space): - w_func = self.getattr(space, space.wrap('__hash__'), False) + w_func = self.getattr(space, '__hash__', False) if w_func is None: - w_eq = self.getattr(space, space.wrap('__eq__'), False) - w_cmp = self.getattr(space, space.wrap('__cmp__'), False) + w_eq = self.getattr(space, '__eq__', False) + w_cmp = self.getattr(space, '__cmp__', False) if w_eq is not None or w_cmp is not None: raise OperationError(space.w_TypeError, space.wrap("unhashable instance")) @@ -584,7 +587,7 @@ return w_ret def descr_index(self, space): - w_func = self.getattr(space, space.wrap('__index__'), False) + w_func = self.getattr(space, '__index__', False) if w_func is not None: return space.call_function(w_func) raise OperationError( @@ -592,7 +595,7 @@ space.wrap("object cannot be interpreted as an index")) def descr_contains(self, space, w_obj): - w_func = self.getattr(space, space.wrap('__contains__'), False) + w_func = self.getattr(space, '__contains__', False) if w_func is not None: return space.wrap(space.is_true(space.call_function(w_func, w_obj))) # now do it ourselves @@ -615,7 +618,7 @@ w_a = self w_b = w_other if w_a is self: - w_func = self.getattr(space, space.wrap('__pow__'), False) + w_func = self.getattr(space, '__pow__', False) if w_func is not None: return space.call_function(w_func, w_other) return space.w_NotImplemented @@ -623,7 +626,7 @@ return space.pow(w_a, w_b, space.w_None) else: # CPython also doesn't try coercion in this case - w_func = self.getattr(space, space.wrap('__pow__'), False) + w_func = self.getattr(space, '__pow__', False) if w_func is not None: return space.call_function(w_func, w_other, w_modulo) return space.w_NotImplemented @@ -635,7 +638,7 @@ w_a = self w_b = w_other if w_a is self: - w_func = self.getattr(space, space.wrap('__rpow__'), False) + w_func = self.getattr(space, '__rpow__', False) if w_func is not None: return space.call_function(w_func, w_other) return space.w_NotImplemented @@ -643,13 +646,13 @@ return space.pow(w_b, w_a, space.w_None) else: # CPython also doesn't try coercion in this case - w_func = self.getattr(space, space.wrap('__rpow__'), False) + w_func = self.getattr(space, '__rpow__', False) if w_func is not None: return space.call_function(w_func, w_other, w_modulo) return space.w_NotImplemented def descr_next(self, space): - w_func = self.getattr(space, space.wrap('next'), False) + w_func = self.getattr(space, 'next', False) if w_func is None: raise OperationError(space.w_TypeError, space.wrap("instance has no next() method")) @@ -658,7 +661,7 @@ def descr_del(self, space): # Note that this is called from executioncontext.UserDelAction # via the space.userdel() method. - w_func = self.getattr(space, space.wrap('__del__'), False) + w_func = self.getattr(space, '__del__', False) if w_func is not None: space.call_function(w_func) @@ -752,13 +755,8 @@ unwrap_spec=['self', ObjSpace, W_Root, W_Root]), next = interp2app(W_InstanceObject.descr_next, unwrap_spec=['self', ObjSpace]), - __weakref__ = make_weakref_descr(W_InstanceObject), __del__ = interp2app(W_InstanceObject.descr_del, unwrap_spec=['self', ObjSpace]), **rawdict ) W_InstanceObject.typedef.acceptable_as_base_class = False - -class W_InstanceObjectWithDel(W_InstanceObject): - def __del__(self): - self._enqueue_for_destruction(self.space) From antocuni at codespeak.net Wed Sep 1 18:48:19 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Wed, 1 Sep 2010 18:48:19 +0200 (CEST) Subject: [pypy-svn] r76820 - pypy/build/bot2/pypybuildbot/test Message-ID: <20100901164819.DD418282B9D@codespeak.net> Author: antocuni Date: Wed Sep 1 18:48:18 2010 New Revision: 76820 Modified: pypy/build/bot2/pypybuildbot/test/test_summary.py Log: make sure that tests pass with buildbot 0.7.12, i.e. the version used by buildmaster on codespeak (which incidentally is also the version bundled with ubuntu) Modified: pypy/build/bot2/pypybuildbot/test/test_summary.py ============================================================================== --- pypy/build/bot2/pypybuildbot/test/test_summary.py (original) +++ pypy/build/bot2/pypybuildbot/test/test_summary.py Wed Sep 1 18:48:18 2010 @@ -289,6 +289,7 @@ def _BuilderToStatus(status): setup = {'name': 'builder', 'builddir': 'BUILDDIR', + 'slavebuilddir': 'SLAVEBUILDDIR', 'factory': process_factory.BuildFactory() } return process_builder.Builder(setup, status) @@ -363,7 +364,7 @@ step.finished = t + (n+1)*60 t = step.finished + 30 build.buildFinished() - builder.addBuildToCache(build) + builder.touchBuildCache(build) n += 1 builder.nextBuildNumber = n @@ -387,7 +388,7 @@ build = status_builder.BuildStatus(builder, 0) build.started = time.time() build.buildFinished() - builder.addBuildToCache(build) + builder.touchBuildCache(build) builder.nextBuildNumber = len(builder.buildCache) s = summary.Summary() @@ -404,7 +405,7 @@ build.started = time.time() build.setProperty('got_revision', '50000', None) build.buildFinished() - builder.addBuildToCache(build) + builder.touchBuildCache(build) builder.nextBuildNumber = len(builder.buildCache) s = summary.Summary() @@ -430,7 +431,7 @@ step1.setText(['other', 'borken']) step1.stepFinished(summary.FAILURE) build.buildFinished() - builder.addBuildToCache(build) + builder.touchBuildCache(build) builder.nextBuildNumber = len(builder.buildCache) s = summary.Summary() @@ -614,7 +615,7 @@ step2.setText(["pytest2", "aborted"]) step2.stepFinished(summary.EXCEPTION) build.buildFinished() - builder.addBuildToCache(build) + builder.touchBuildCache(build) builder.nextBuildNumber = 1 s = summary.Summary() @@ -637,7 +638,7 @@ step.setText(["pytest", "failed slave lost"]) step.stepFinished(summary.FAILURE) build.buildFinished() - builder.addBuildToCache(build) + builder.touchBuildCache(build) builder.nextBuildNumber = 1 s = summary.Summary() From hakanardo at codespeak.net Wed Sep 1 19:23:32 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Wed, 1 Sep 2010 19:23:32 +0200 (CEST) Subject: [pypy-svn] r76821 - in pypy/branch/jit-bounds/pypy/jit/metainterp: . optimizeopt test Message-ID: <20100901172332.A3251282B9D@codespeak.net> Author: hakanardo Date: Wed Sep 1 19:23:29 2010 New Revision: 76821 Added: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/__init__.py pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/heap.py pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/intbounds.py pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py (contents, props changed) - copied, changed from r76817, pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt.py pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/rewrite.py pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/virtualize.py Removed: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt.py Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_intbound.py pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_resume.py Log: Converted optimizeopt into a package and started to split up into several separated optimizations Added: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/__init__.py ============================================================================== --- (empty file) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/__init__.py Wed Sep 1 19:23:29 2010 @@ -0,0 +1,21 @@ +from optimizer import Optimizer +from rewrite import Rewrite + +def optimize_loop_1(metainterp_sd, loop, virtuals=True): + """Optimize loop.operations to make it match the input of loop.specnodes + and to remove internal overheadish operations. Note that loop.specnodes + must be applicable to the loop; you will probably get an AssertionError + if not. + """ + optimizations = (Rewrite(),) + optimizer = Optimizer(metainterp_sd, loop, optimizations) + if virtuals: + optimizer.setup_virtuals_and_constants() + optimizer.propagate_all_forward() + +def optimize_bridge_1(metainterp_sd, bridge): + """The same, but for a bridge. The only difference is that we don't + expect 'specnodes' on the bridge. + """ + optimize_loop_1(metainterp_sd, bridge, False) + Added: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/heap.py ============================================================================== --- (empty file) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/heap.py Wed Sep 1 19:23:29 2010 @@ -0,0 +1,6 @@ +from optimizer import Optimization + +class Heap(Optimization): + """Cache repeated heap accesses""" + # FIXME: Move here + Added: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/intbounds.py ============================================================================== --- (empty file) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/intbounds.py Wed Sep 1 19:23:29 2010 @@ -0,0 +1,7 @@ +from optimizer import Optimization + +class IntBounds(Optimization): + """Keeps track of the bounds placed on integers by the guards and + remove redundant guards""" + # FIXME: Move here + Copied: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py (from r76817, pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt.py) ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py Wed Sep 1 19:23:29 2010 @@ -1,7 +1,7 @@ from pypy.jit.metainterp.history import Box, BoxInt, LoopToken, BoxFloat,\ ConstFloat from pypy.jit.metainterp.history import Const, ConstInt, ConstPtr, ConstObj, REF -from pypy.jit.metainterp.resoperation import rop, ResOperation, opboolinvers, opboolreflex +from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp import jitprof from pypy.jit.metainterp.executor import execute_nonspec from pypy.jit.metainterp.specnode import SpecNode, NotSpecNode, ConstantSpecNode @@ -23,25 +23,6 @@ MAXINT = sys.maxint MININT = -sys.maxint - 1 -def optimize_loop_1(metainterp_sd, loop): - """Optimize loop.operations to make it match the input of loop.specnodes - and to remove internal overheadish operations. Note that loop.specnodes - must be applicable to the loop; you will probably get an AssertionError - if not. - """ - optimizer = Optimizer(metainterp_sd, loop) - optimizer.setup_virtuals_and_constants() - optimizer.propagate_forward() - -def optimize_bridge_1(metainterp_sd, bridge): - """The same, but for a bridge. The only difference is that we don't - expect 'specnodes' on the bridge. - """ - optimizer = Optimizer(metainterp_sd, bridge) - optimizer.propagate_forward() - -# ____________________________________________________________ - class IntBound(object): _attrs_ = ('has_upper', 'has_lower', 'upper', 'lower') @@ -597,10 +578,26 @@ subspecnode = self.items[index] subspecnode.teardown_virtual_node(optimizer, subvalue, newexitargs) +class Optimization(object): + def propagate_forward(self, op): + raise NotImplemented + + def emit_operation(self, op): + self.next_optimization.propagate_forward(op) + + def getvalue(self, box): + return self.optimizer.getvalue(box) + + def make_constant(self, box, constbox): + return self.optimizer.make_constant(box, constbox) + + def make_equal_to(self, box, value): + return self.optimizer.make_equal_to(box, value) + -class Optimizer(object): +class Optimizer(Optimization): - def __init__(self, metainterp_sd, loop): + def __init__(self, metainterp_sd, loop, optimizations=()): self.metainterp_sd = metainterp_sd self.cpu = metainterp_sd.cpu self.loop = loop @@ -612,6 +609,16 @@ self.loop_invariant_results = {} self.pure_operations = args_dict() self.producer = {} + + if len(optimizations) == 0: + self.first_optimization = self + else: + self.first_optimization = optimizations[0] + for i in range(1, len(optimizations)): + optimizations[i - 1].next_optimization = optimizations[i] + optimizations[-1].next_optimization = self + for o in optimizations: + o.optimizer = self def forget_numberings(self, virtualbox): self.metainterp_sd.profiler.count(jitprof.OPT_FORCINGS) @@ -723,25 +730,29 @@ # ---------- - def propagate_forward(self): + def propagate_all_forward(self): self.exception_might_have_happened = False self.newoperations = [] self.i = 0 while self.i < len(self.loop.operations): op = self.loop.operations[self.i] - self.producer[op.result] = op - opnum = op.opnum - for value, func in optimize_ops: - if opnum == value: - func(self, op) - break - else: - self.optimize_default(op) + self.first_optimization.propagate_forward(op) self.i += 1 self.loop.operations = self.newoperations # accumulate counters self.resumedata_memo.update_counters(self.metainterp_sd.profiler) + def propagate_forward(self, op): + self.producer[op.result] = op + opnum = op.opnum + for value, func in optimize_ops: + if opnum == value: + func(self, op) + break + else: + self.optimize_default(op) + + def propagate_bounds_backward(self, box): # FIXME: This takes care of the instruction where box is the reuslt # but the bounds produced by all instructions where box is @@ -848,59 +859,11 @@ if is_ovf: self.i += 1 # skip next operation, it is the unneeded guard return - elif self.find_rewritable_bool(op, args): - return else: self.pure_operations[args] = op # otherwise, the operation remains self.emit_operation(op) - - - def try_boolinvers(self, op, targs): - oldop = self.pure_operations.get(targs, None) - if oldop is not None and oldop.descr is op.descr: - value = self.getvalue(oldop.result) - if value.is_constant(): - if value.box.same_constant(CONST_1): - self.make_constant(op.result, CONST_0) - return True - elif value.box.same_constant(CONST_0): - self.make_constant(op.result, CONST_1) - return True - - return False - - - def find_rewritable_bool(self, op, args): - try: - oldopnum = opboolinvers[op.opnum] - targs = [args[0], args[1], ConstInt(oldopnum)] - if self.try_boolinvers(op, targs): - return True - except KeyError: - pass - - try: - oldopnum = opboolreflex[op.opnum] - targs = [args[1], args[0], ConstInt(oldopnum)] - oldop = self.pure_operations.get(targs, None) - if oldop is not None and oldop.descr is op.descr: - self.make_equal_to(op.result, self.getvalue(oldop.result)) - return True - except KeyError: - pass - - try: - oldopnum = opboolinvers[opboolreflex[op.opnum]] - targs = [args[1], args[0], ConstInt(oldopnum)] - if self.try_boolinvers(op, targs): - return True - except KeyError: - pass - - return False - def optimize_JUMP(self, op): orgop = self.loop.operations[-1] Added: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- (empty file) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/rewrite.py Wed Sep 1 19:23:29 2010 @@ -0,0 +1,63 @@ +from optimizer import Optimization, CONST_1, CONST_0 +from pypy.jit.metainterp.resoperation import opboolinvers, opboolreflex +from pypy.jit.metainterp.history import ConstInt + +class Rewrite(Optimization): + """Rewrite operations into equvivialent, already executed operations + or constants. + """ + + def propagate_forward(self, op): + args = self.optimizer.make_args_key(op) + if self.find_rewritable_bool(op, args): + return + self.emit_operation(op) + + def try_boolinvers(self, op, targs): + oldop = self.optimizer.pure_operations.get(targs, None) + if oldop is not None and oldop.descr is op.descr: + value = self.getvalue(oldop.result) + if value.is_constant(): + if value.box.same_constant(CONST_1): + self.make_constant(op.result, CONST_0) + return True + elif value.box.same_constant(CONST_0): + self.make_constant(op.result, CONST_1) + return True + + return False + + + def find_rewritable_bool(self, op, args): + try: + oldopnum = opboolinvers[op.opnum] + targs = [args[0], args[1], ConstInt(oldopnum)] + if self.try_boolinvers(op, targs): + return True + except KeyError: + pass + + try: + oldopnum = opboolreflex[op.opnum] + targs = [args[1], args[0], ConstInt(oldopnum)] + oldop = self.optimizer.pure_operations.get(targs, None) + if oldop is not None and oldop.descr is op.descr: + self.make_equal_to(op.result, self.getvalue(oldop.result)) + return True + except KeyError: + pass + + try: + oldopnum = opboolinvers[opboolreflex[op.opnum]] + targs = [args[1], args[0], ConstInt(oldopnum)] + if self.try_boolinvers(op, targs): + return True + except KeyError: + pass + + return False + + + + + Added: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- (empty file) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/virtualize.py Wed Sep 1 19:23:29 2010 @@ -0,0 +1,6 @@ +from optimizer import Optimization + +class Virtualize(Optimization): + "Virtualize objects until they escape." + # FIXME: Move here + Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_intbound.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_intbound.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_intbound.py Wed Sep 1 19:23:29 2010 @@ -1,4 +1,4 @@ -from pypy.jit.metainterp.optimizeopt import IntBound, IntUpperBound, \ +from pypy.jit.metainterp.optimizeopt.optimizer import IntBound, IntUpperBound, \ IntLowerBound, IntUnbounded from copy import copy Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_optimizeopt.py Wed Sep 1 19:23:29 2010 @@ -4,7 +4,7 @@ #OOtypeMixin, BaseTest) from pypy.jit.metainterp.optimizefindnode import PerfectSpecializationFinder -from pypy.jit.metainterp import optimizeopt +import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt from pypy.jit.metainterp.optimizeopt import optimize_loop_1 from pypy.jit.metainterp.optimizeutil import InvalidLoop from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_resume.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_resume.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_resume.py Wed Sep 1 19:23:29 2010 @@ -1,7 +1,7 @@ import py from pypy.rpython.lltypesystem import lltype, llmemory, rffi -from pypy.jit.metainterp.optimizeopt import VirtualValue, OptValue, VArrayValue -from pypy.jit.metainterp.optimizeopt import VStructValue +from pypy.jit.metainterp.optimizeopt.optimizer import VirtualValue, OptValue, VArrayValue +from pypy.jit.metainterp.optimizeopt.optimizer import VStructValue from pypy.jit.metainterp.resume import * from pypy.jit.metainterp.history import BoxInt, BoxPtr, ConstInt from pypy.jit.metainterp.history import ConstPtr, ConstFloat From arigo at codespeak.net Wed Sep 1 19:34:37 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 1 Sep 2010 19:34:37 +0200 (CEST) Subject: [pypy-svn] r76822 - pypy/trunk/pypy/rpython/lltypesystem Message-ID: <20100901173437.C51A9282B9D@codespeak.net> Author: arigo Date: Wed Sep 1 19:34:36 2010 New Revision: 76822 Modified: pypy/trunk/pypy/rpython/lltypesystem/lltype.py Log: Add two caches when making new types, for Ptr() and for FixedSizeArray(1). These have been found to be consuming really a lot of memory. Modified: pypy/trunk/pypy/rpython/lltypesystem/lltype.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/lltype.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/lltype.py Wed Sep 1 19:34:36 2010 @@ -401,6 +401,19 @@ # behaves more or less like a Struct with fields item0, item1, ... # but also supports __getitem__(), __setitem__(), __len__(). + _cache = weakref.WeakKeyDictionary() # cache the length-1 FixedSizeArrays + def __new__(cls, OF, length, **kwds): + if length == 1 and not kwds: + try: + obj = FixedSizeArray._cache[OF] + except KeyError: + obj = FixedSizeArray._cache[OF] = Struct.__new__(cls) + except TypeError: + obj = Struct.__new__(cls) + else: + obj = Struct.__new__(cls) + return obj + def __init__(self, OF, length, **kwds): fields = [('item%d' % i, OF) for i in range(length)] super(FixedSizeArray, self).__init__('array%d' % length, *fields, @@ -610,6 +623,16 @@ class Ptr(LowLevelType): __name__ = property(lambda self: '%sPtr' % self.TO.__name__) + _cache = weakref.WeakKeyDictionary() # cache the Ptrs + def __new__(cls, TO): + try: + obj = Ptr._cache[TO] + except KeyError: + obj = Ptr._cache[TO] = LowLevelType.__new__(cls) + except TypeError: + obj = LowLevelType.__new__(cls) + return obj + def __init__(self, TO): if not isinstance(TO, ContainerType): raise TypeError, ("can only point to a Container type, " From arigo at codespeak.net Wed Sep 1 19:55:49 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 1 Sep 2010 19:55:49 +0200 (CEST) Subject: [pypy-svn] r76823 - pypy/trunk/pypy/rpython/lltypesystem Message-ID: <20100901175549.7C8B0282B9D@codespeak.net> Author: arigo Date: Wed Sep 1 19:55:47 2010 New Revision: 76823 Modified: pypy/trunk/pypy/rpython/lltypesystem/llmemory.py Log: Add a __slots__ declaration here, hoping to save 45MB in a "translate.py -Ojit". Modified: pypy/trunk/pypy/rpython/lltypesystem/llmemory.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/llmemory.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/llmemory.py Wed Sep 1 19:55:47 2010 @@ -389,6 +389,7 @@ # ------------------------------------------------------------- class fakeaddress(object): + __slots__ = ['ptr'] # NOTE: the 'ptr' in the addresses must be normalized. # Use cast_ptr_to_adr() instead of directly fakeaddress() if unsure. def __init__(self, ptr): @@ -530,7 +531,6 @@ pass NULL = fakeaddress(None) -NULL.intaddress = 0 # this is to make memory.lladdress more happy Address = lltype.Primitive("Address", NULL) # GCREF is similar to Address but it is GC-aware From arigo at codespeak.net Wed Sep 1 19:58:44 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 1 Sep 2010 19:58:44 +0200 (CEST) Subject: [pypy-svn] r76824 - pypy/trunk/pypy/rpython/lltypesystem Message-ID: <20100901175844.481ED282B9D@codespeak.net> Author: arigo Date: Wed Sep 1 19:58:42 2010 New Revision: 76824 Modified: pypy/trunk/pypy/rpython/lltypesystem/llgroup.py Log: Add another __slots__, worth 14.5 MB. I'm stopping here. Modified: pypy/trunk/pypy/rpython/lltypesystem/llgroup.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/llgroup.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/llgroup.py Wed Sep 1 19:58:42 2010 @@ -99,6 +99,7 @@ '&~0xFFFF' or with a direct masking like '&0x10000' (resp. on 64-bit platform, with '&~0xFFFFFFFF' or '&0x100000000'). """ + __slots__ = ['lowpart', 'rest'] MASK = (1< Author: arigo Date: Wed Sep 1 20:07:14 2010 New Revision: 76825 Modified: pypy/trunk/pypy/translator/c/database.py pypy/trunk/pypy/translator/c/gc.py pypy/trunk/pypy/translator/c/node.py Log: Move around the time at which string attributes are computed: * on the DefNodes, compute them once and cache them, instead of recomputing every time someone asks. Avoids ending up with tons of copies of the same string in the process. * on the ContainerNodes, do the exact opposite: instead of caching 'ptrname', compute it whenever asked, because it will typically be used to build some larger string sent to C and then forgotten. Modified: pypy/trunk/pypy/translator/c/database.py ============================================================================== --- pypy/trunk/pypy/translator/c/database.py (original) +++ pypy/trunk/pypy/translator/c/database.py Wed Sep 1 20:07:14 2010 @@ -213,7 +213,7 @@ forcename = self.idelayedfunctionnames[obj][0] node = self.getcontainernode(container, forcename=forcename) - assert node.ptrname == forcename + assert node.getptrname() == forcename return forcename # /hack hack hack @@ -222,7 +222,7 @@ return '((%s) %d)' % (cdecl(self.gettype(T), ''), obj._obj) node = self.getcontainernode(container) - return node.ptrname + return node.getptrname() else: return '((%s) NULL)' % (cdecl(self.gettype(T), ''), ) else: Modified: pypy/trunk/pypy/translator/c/gc.py ============================================================================== --- pypy/trunk/pypy/translator/c/gc.py (original) +++ pypy/trunk/pypy/translator/c/gc.py Wed Sep 1 20:07:14 2010 @@ -172,7 +172,9 @@ defnode = db.gettypedefnode(obj.about) self.implementationtypename = 'void (@)(void *)' self.name = defnode.gcinfo.static_deallocator - self.ptrname = '((void (*)(void *)) %s)' % (self.name,) + + def getptrname(self): + return '((void (*)(void *)) %s)' % (self.name,) def enum_dependencies(self): return [] @@ -266,7 +268,9 @@ defnode = db.gettypedefnode(obj.about) self.implementationtypename = self.typename self.name = self.db.namespace.uniquename('g_rtti_v_'+ defnode.barename) - self.ptrname = '(&%s)' % (self.name,) + + def getptrname(self): + return '(&%s)' % (self.name,) def enum_dependencies(self): return [] Modified: pypy/trunk/pypy/translator/c/node.py ============================================================================== --- pypy/trunk/pypy/translator/c/node.py (original) +++ pypy/trunk/pypy/translator/c/node.py Wed Sep 1 20:07:14 2010 @@ -77,6 +77,8 @@ if db.gcpolicy.need_no_typeptr(): assert self.fieldnames == ('typeptr',) self.fieldnames = () + # + self.fulltypename = '%s %s @' % (self.typetag, self.name) def setup(self): # this computes self.fields @@ -119,7 +121,7 @@ gcinfo = defaultproperty(computegcinfo) def gettype(self): - return '%s %s @' % (self.typetag, self.name) + return self.fulltypename def c_struct_field_name(self, name): # occasionally overridden in __init__(): @@ -211,6 +213,8 @@ self.name) = db.namespace.uniquename(basename, with_number=with_number, bare=True) self.dependencies = {} + self.fulltypename = '%s %s @' % (self.typetag, self.name) + self.fullptrtypename = '%s %s *@' % (self.typetag, self.name) def setup(self): if hasattr(self, 'itemtypename'): @@ -236,10 +240,10 @@ gcinfo = defaultproperty(computegcinfo) def gettype(self): - return '%s %s @' % (self.typetag, self.name) + return self.fulltypename def getptrtype(self): - return '%s %s *@' % (self.typetag, self.name) + return self.fullptrtypename def access_expr(self, baseexpr, index): return '%s.items[%s]' % (baseexpr, index) @@ -336,16 +340,19 @@ if ARRAY._hints.get("render_as_void"): contained_type = Void self.itemtypename = db.gettype(contained_type, who_asks=self) + self.fulltypename = self.itemtypename.replace('@', '(@)[%d]' % + (self.varlength,)) + self.fullptrtypename = self.itemtypename.replace('@', '*@') def setup(self): """Array loops are forbidden by ForwardReference.become() because there is no way to declare them in C.""" def gettype(self): - return self.itemtypename.replace('@', '(@)[%d]' % (self.varlength,)) + return self.fulltypename def getptrtype(self): - return self.itemtypename.replace('@', '*@') + return self.fullptrtypename def access_expr(self, baseexpr, index): return '%s[%d]' % (baseexpr, index) @@ -383,17 +390,19 @@ self.LLTYPE = FIXEDARRAY self.dependencies = {} self.itemtypename = db.gettype(FIXEDARRAY.OF, who_asks=self) + self.fulltypename = self.itemtypename.replace('@', '(@)[%d]' % + FIXEDARRAY.length) + self.fullptrtypename = self.itemtypename.replace('@', '*@') def setup(self): """Loops are forbidden by ForwardReference.become() because there is no way to declare them in C.""" def gettype(self): - FIXEDARRAY = self.FIXEDARRAY - return self.itemtypename.replace('@', '(@)[%d]' % FIXEDARRAY.length) + return self.fulltypename def getptrtype(self): - return self.itemtypename.replace('@', '*@') + return self.fullptrtypename def access_expr(self, baseexpr, index, dummy=False): if not isinstance(index, int): @@ -469,7 +478,7 @@ if USESLOTS: # keep the number of slots down! __slots__ = """db obj typename implementationtypename - name ptrname + name globalcontainer""".split() eci_name = '_compilation_info' @@ -494,7 +503,9 @@ if self.typename != self.implementationtypename: if db.gettypedefnode(T).extra_union_for_varlength: self.name += '.b' - self.ptrname = '(&%s)' % self.name + + def getptrname(self): + return '(&%s)' % self.name def getTYPE(self): return typeOf(self.obj) @@ -667,10 +678,10 @@ if USESLOTS: __slots__ = () - def __init__(self, db, T, obj): - ContainerNode.__init__(self, db, T, obj) - if barebonearray(T): - self.ptrname = self.name + def getptrname(self): + if barebonearray(self.getTYPE()): + return self.name + return ContainerNode.getptrname(self) def basename(self): return 'array' @@ -728,10 +739,10 @@ if USESLOTS: __slots__ = () - def __init__(self, db, T, obj): - ContainerNode.__init__(self, db, T, obj) - if not isinstance(obj, _subarray): # XXX hackish - self.ptrname = self.name + def getptrname(self): + if not isinstance(self.obj, _subarray): # XXX hackish + return self.name + return ContainerNode.getptrname(self) def basename(self): T = self.getTYPE() @@ -812,7 +823,9 @@ self.make_funcgens() #self.dependencies = {} self.typename = db.gettype(T) #, who_asks=self) - self.ptrname = self.name + + def getptrname(self): + return self.name def make_funcgens(self): self.funcgens = select_function_code_generators(self.obj, self.db, self.name) @@ -958,7 +971,7 @@ def startupcode(self): T = self.getTYPE() - args = [self.ptrname] + args = [self.getptrname()] # XXX how to make this code more generic? if T.tag == 'ThreadLock': lock = self.obj.externalobj @@ -990,13 +1003,15 @@ self.obj = obj value = obj.value self.name = self._python_c_name(value) - self.ptrname = self.name self.exported_name = self.name # a list of expressions giving places where this constant PyObject # must be copied. Normally just in the global variable of the same # name, but see also StructNode.initializationexpr() :-( self.where_to_copy_me = [] + def getptrname(self): + return self.name + def _python_c_name(self, value): # just some minimal cases: None and builtin exceptions if value is None: From arigo at codespeak.net Wed Sep 1 20:07:37 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 1 Sep 2010 20:07:37 +0200 (CEST) Subject: [pypy-svn] r76826 - pypy/trunk/pypy/interpreter Message-ID: <20100901180737.6CC9B282B9D@codespeak.net> Author: arigo Date: Wed Sep 1 20:07:34 2010 New Revision: 76826 Modified: pypy/trunk/pypy/interpreter/argument.py Log: Fix Signature.__eq__(). Modified: pypy/trunk/pypy/interpreter/argument.py ============================================================================== --- pypy/trunk/pypy/interpreter/argument.py (original) +++ pypy/trunk/pypy/interpreter/argument.py Wed Sep 1 20:07:34 2010 @@ -52,11 +52,15 @@ self.argnames, self.varargname, self.kwargname) def __eq__(self, other): + if not isinstance(other, Signature): + return NotImplemented return (self.argnames == other.argnames and self.varargname == other.varargname and self.kwargname == other.kwargname) def __ne__(self, other): + if not isinstance(other, Signature): + return NotImplemented return not self == other From wlav at codespeak.net Thu Sep 2 02:20:21 2010 From: wlav at codespeak.net (wlav at codespeak.net) Date: Thu, 2 Sep 2010 02:20:21 +0200 (CEST) Subject: [pypy-svn] r76827 - in pypy/branch/reflex-support/pypy/module/cppyy: . test Message-ID: <20100902002021.F16D8282B9C@codespeak.net> Author: wlav Date: Thu Sep 2 02:20:18 2010 New Revision: 76827 Modified: pypy/branch/reflex-support/pypy/module/cppyy/converter.py pypy/branch/reflex-support/pypy/module/cppyy/executor.py pypy/branch/reflex-support/pypy/module/cppyy/helper.py pypy/branch/reflex-support/pypy/module/cppyy/test/datatypes.h pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py pypy/branch/reflex-support/pypy/module/cppyy/test/test_helper.py Log: Initial implementation for passing short int arrays. Modified: pypy/branch/reflex-support/pypy/module/cppyy/converter.py ============================================================================== --- pypy/branch/reflex-support/pypy/module/cppyy/converter.py (original) +++ pypy/branch/reflex-support/pypy/module/cppyy/converter.py Thu Sep 2 02:20:18 2010 @@ -3,6 +3,8 @@ from pypy.rlib.rarithmetic import r_singlefloat from pypy.objspace.std.intobject import W_IntObject +from pypy.module._rawffi.interp_rawffi import unpack_simple_shape + from pypy.module.cppyy import helper, capi _converters = {} @@ -166,6 +168,41 @@ return rffi.cast(rffi.VOIDP, x) +class ShortPtrConverter(TypeConverter): + _immutable_ = True + def __init__(self, detail=None): + if detail is None: + import sys + detail = sys.maxint + self.size = detail + + def convert_argument(self, space, w_obj): + assert "not yet implemented" + + def from_memory(self, space, w_obj, offset): + # read access, so no copy needed + fieldptr = self._get_fieldptr(space, w_obj, offset) + shortptr = rffi.cast(rffi.SHORTP, fieldptr) + w_array = unpack_simple_shape(space, space.wrap('h')) + return w_array.fromaddress(space, shortptr, self.size) + + def to_memory(self, space, w_obj, w_value, offset): + # copy only the pointer value + obj = space.interpclass_w(space.findattr(w_obj, space.wrap("_cppinstance"))) + byteptr = rffi.cast(rffi.LONGP, obj.rawobject[offset]) + # TODO: now what ... ?? AFAICS, w_value is a pure python list, not an array? +# byteptr[0] = space.unwrap(space.id(w_value.getslotvalue(2))) + + +class ShortArrayConverter(ShortPtrConverter): + def to_memory(self, space, w_obj, w_value, offset): + # copy the full array (uses byte copy for now) + fieldptr = self._get_fieldptr(space, w_obj, offset) + value = w_value.getslotvalue(2) + for i in range(min(self.size*2, value.getlength())): + fieldptr[i] = value.getitem(i) + + class InstancePtrConverter(TypeConverter): _immutable_ = True def __init__(self, space, cpptype): @@ -199,13 +236,23 @@ # 5) generalized cases (covers basically all user classes) # 6) void converter, which fails on use + # 1) full, exact match try: - return _converters[name] + return _converters[name]() except KeyError: pass - compound = helper.compound(name) - cpptype = interp_cppyy.type_byname(space, helper.clean_type(name)) + # 2) match of decorated, unqualified type + compound, detail = helper.compound(name) + clean_name = helper.clean_type(name) + try: + if detail: + return _converters[clean_name+compound](detail) + return _converters[clean_name+compound]() + except KeyError: + pass + + cpptype = interp_cppyy.type_byname(space, clean_name) if compound == "*": return InstancePtrConverter(space, cpptype) @@ -214,15 +261,17 @@ return VoidConverter(space, name) -_converters["bool"] = BoolConverter() -_converters["char"] = CharConverter() -_converters["unsigned char"] = CharConverter() -_converters["short int"] = ShortConverter() -_converters["unsigned short int"] = ShortConverter() -_converters["int"] = LongConverter() -_converters["unsigned int"] = LongConverter() -_converters["long int"] = LongConverter() -_converters["unsigned long int"] = LongConverter() -_converters["float"] = FloatConverter() -_converters["double"] = DoubleConverter() -_converters["const char*"] = CStringConverter() +_converters["bool"] = BoolConverter +_converters["char"] = CharConverter +_converters["unsigned char"] = CharConverter +_converters["short int"] = ShortConverter +_converters["short int*"] = ShortPtrConverter +_converters["short int[]"] = ShortArrayConverter +_converters["unsigned short int"] = ShortConverter +_converters["int"] = LongConverter +_converters["unsigned int"] = LongConverter +_converters["long int"] = LongConverter +_converters["unsigned long int"] = LongConverter +_converters["float"] = FloatConverter +_converters["double"] = DoubleConverter +_converters["const char*"] = CStringConverter Modified: pypy/branch/reflex-support/pypy/module/cppyy/executor.py ============================================================================== --- pypy/branch/reflex-support/pypy/module/cppyy/executor.py (original) +++ pypy/branch/reflex-support/pypy/module/cppyy/executor.py Thu Sep 2 02:20:18 2010 @@ -75,7 +75,7 @@ except KeyError: pass - compound = helper.compound(name) + compound, detail = helper.compound(name) cpptype = interp_cppyy.type_byname(space, helper.clean_type(name)) if compound == "*": return InstancePtrExecutor(space, cpptype) Modified: pypy/branch/reflex-support/pypy/module/cppyy/helper.py ============================================================================== --- pypy/branch/reflex-support/pypy/module/cppyy/helper.py (original) +++ pypy/branch/reflex-support/pypy/module/cppyy/helper.py Thu Sep 2 02:20:18 2010 @@ -4,8 +4,11 @@ name = "".join(rstring.split(name, "const")) # poor man's replace i = _find_qualifier_index(name) if name[-1] == "]": # array type - return "*" - return "".join(name[i:].split(" ")) + for i in range(len(name) - 1, -1, -1): + c = name[i] + if c == "[": + return "[]", int(name[i+1:-1]) + return "".join(name[i:].split(" ")), None def _find_qualifier_index(name): i = len(name) Modified: pypy/branch/reflex-support/pypy/module/cppyy/test/datatypes.h ============================================================================== --- pypy/branch/reflex-support/pypy/module/cppyy/test/datatypes.h (original) +++ pypy/branch/reflex-support/pypy/module/cppyy/test/datatypes.h Thu Sep 2 02:20:18 2010 @@ -14,6 +14,9 @@ cppyy_test_data(); ~cppyy_test_data(); +// helper + void destroy_arrays(); + // getters bool get_bool(); char get_char(); @@ -107,9 +110,6 @@ static double s_double; private: - void destroy_arrays(); - -private: bool m_owns_arrays; }; Modified: pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py ============================================================================== --- pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py (original) +++ pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py Thu Sep 2 02:20:18 2010 @@ -16,11 +16,10 @@ raise OSError("'make' failed (see stderr)") class AppTestDATATYPES: - N = 5 # should be imported from the dictionary - def setup_class(cls): cls.space = space env = os.environ + cls.w_N = space.wrap(5) # should be imported from the dictionary cls.w_shared_lib = space.wrap(shared_lib) cls.w_datatypes = cls.space.appexec([], """(): import cppyy @@ -93,23 +92,22 @@ c.m_double = 0.456; assert round(c.get_double() - 0.456, 8) == 0 c.set_double( 0.567 ); assert round(c.m_double - 0.567, 8) == 0 - """ # arrays; there will be pointer copies, so destroy the current ones c.destroy_arrays() # integer arrays - a = range(N) - atypes = [ 'h', 'H', 'i', 'I', 'l', 'L' ] - for j in range(len(names)): - b = array(atypes[j], a) + import array + a = range(self.N) + atypes = [ 'h' ] #, 'H', 'i', 'I', 'l', 'L' ] + for j in range(len(atypes)):#names)): + b = array.array(atypes[j], a) exec 'c.m_%s_array = b' % names[j] # buffer copies - for i in range(N): + for i in range(self.N): assert eval('c.m_%s_array[i]' % names[j]) == b[i] - exec 'c.m_%s_array2 = b' % names[j] # pointer copies - b[i] = 28 - for i in range(N): - assert eval('c.m_%s_array2[i]' % names[j]) == b[i] - """ +# exec 'c.m_%s_array2 = b' % names[j] # pointer copies +# b[i] = 28 +# for i in range(self.N): +# assert eval('c.m_%s_array2[i]' % names[j]) == b[i] c.destruct() Modified: pypy/branch/reflex-support/pypy/module/cppyy/test/test_helper.py ============================================================================== --- pypy/branch/reflex-support/pypy/module/cppyy/test/test_helper.py (original) +++ pypy/branch/reflex-support/pypy/module/cppyy/test/test_helper.py Thu Sep 2 02:20:18 2010 @@ -1,11 +1,13 @@ from pypy.module.cppyy import helper def test_compound(): - assert helper.compound("int*") == "*" - assert helper.compound("int* const *&") == "**&" - assert helper.compound("std::vector*") == "*" + assert helper.compound("int*") == ("*", None) + assert helper.compound("int* const *&") == ("**&", None) + assert helper.compound("std::vector*") == ("*", None) + assert helper.compound("unsigned long int[5]") == ("[]", 5) def test_clean_type(): assert helper.clean_type(" int***") == "int" assert helper.clean_type("std::vector&") == "std::vector" + assert helper.clean_type("unsigned short int[3]") == "unsigned short int" From wlav at codespeak.net Thu Sep 2 02:31:11 2010 From: wlav at codespeak.net (wlav at codespeak.net) Date: Thu, 2 Sep 2010 02:31:11 +0200 (CEST) Subject: [pypy-svn] r76828 - in pypy/branch/reflex-support/pypy/module/cppyy: . test Message-ID: <20100902003111.108BE282B9C@codespeak.net> Author: wlav Date: Thu Sep 2 02:31:09 2010 New Revision: 76828 Modified: pypy/branch/reflex-support/pypy/module/cppyy/converter.py pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py Log: Initial support for unsigned short int data member access. Modified: pypy/branch/reflex-support/pypy/module/cppyy/converter.py ============================================================================== --- pypy/branch/reflex-support/pypy/module/cppyy/converter.py (original) +++ pypy/branch/reflex-support/pypy/module/cppyy/converter.py Thu Sep 2 02:31:09 2010 @@ -261,17 +261,19 @@ return VoidConverter(space, name) -_converters["bool"] = BoolConverter -_converters["char"] = CharConverter -_converters["unsigned char"] = CharConverter -_converters["short int"] = ShortConverter -_converters["short int*"] = ShortPtrConverter -_converters["short int[]"] = ShortArrayConverter -_converters["unsigned short int"] = ShortConverter -_converters["int"] = LongConverter -_converters["unsigned int"] = LongConverter -_converters["long int"] = LongConverter -_converters["unsigned long int"] = LongConverter -_converters["float"] = FloatConverter -_converters["double"] = DoubleConverter -_converters["const char*"] = CStringConverter +_converters["bool"] = BoolConverter +_converters["char"] = CharConverter +_converters["unsigned char"] = CharConverter +_converters["short int"] = ShortConverter +_converters["short int*"] = ShortPtrConverter +_converters["short int[]"] = ShortArrayConverter +_converters["unsigned short int"] = ShortConverter +_converters["unsigned short int*"] = ShortPtrConverter +_converters["unsigned short int[]"] = ShortArrayConverter +_converters["int"] = LongConverter +_converters["unsigned int"] = LongConverter +_converters["long int"] = LongConverter +_converters["unsigned long int"] = LongConverter +_converters["float"] = FloatConverter +_converters["double"] = DoubleConverter +_converters["const char*"] = CStringConverter Modified: pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py ============================================================================== --- pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py (original) +++ pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py Thu Sep 2 02:31:09 2010 @@ -98,7 +98,7 @@ # integer arrays import array a = range(self.N) - atypes = [ 'h' ] #, 'H', 'i', 'I', 'l', 'L' ] + atypes = ['h', 'H']#, 'i', 'I', 'l', 'L' ] for j in range(len(atypes)):#names)): b = array.array(atypes[j], a) exec 'c.m_%s_array = b' % names[j] # buffer copies From dan at codespeak.net Thu Sep 2 10:06:59 2010 From: dan at codespeak.net (dan at codespeak.net) Date: Thu, 2 Sep 2010 10:06:59 +0200 (CEST) Subject: [pypy-svn] r76831 - in pypy/branch/micronumpy/pypy: jit/tl module/micronumpy module/micronumpy/test Message-ID: <20100902080659.B8F10282B9C@codespeak.net> Author: dan Date: Thu Sep 2 10:06:58 2010 New Revision: 76831 Modified: pypy/branch/micronumpy/pypy/jit/tl/pypyjit.py pypy/branch/micronumpy/pypy/jit/tl/pypyjit_demo.py pypy/branch/micronumpy/pypy/module/micronumpy/array.py pypy/branch/micronumpy/pypy/module/micronumpy/microarray.py pypy/branch/micronumpy/pypy/module/micronumpy/test/test_numpy.py Log: More compliance, broke test_subarray though, fixing soon. Modified: pypy/branch/micronumpy/pypy/jit/tl/pypyjit.py ============================================================================== --- pypy/branch/micronumpy/pypy/jit/tl/pypyjit.py (original) +++ pypy/branch/micronumpy/pypy/jit/tl/pypyjit.py Thu Sep 2 10:06:58 2010 @@ -37,7 +37,8 @@ set_opt_level(config, level='jit') config.objspace.allworkingmodules = False config.objspace.usemodules.pypyjit = True -config.objspace.usemodules.array = True +config.objspace.usemodules.array = False +config.objspace.usemodules.micronumpy = True config.objspace.usemodules._weakref = False config.objspace.usemodules._sre = False set_pypy_opt_level(config, level='jit') Modified: pypy/branch/micronumpy/pypy/jit/tl/pypyjit_demo.py ============================================================================== --- pypy/branch/micronumpy/pypy/jit/tl/pypyjit_demo.py (original) +++ pypy/branch/micronumpy/pypy/jit/tl/pypyjit_demo.py Thu Sep 2 10:06:58 2010 @@ -1,64 +1,19 @@ -## base = object - -## class Number(base): -## __slots__ = ('val', ) -## def __init__(self, val=0): -## self.val = val - -## def __add__(self, other): -## if not isinstance(other, int): -## other = other.val -## return Number(val=self.val + other) - -## def __cmp__(self, other): -## val = self.val -## if not isinstance(other, int): -## other = other.val -## return cmp(val, other) - -## def __nonzero__(self): -## return bool(self.val) - -## def g(x, inc=2): -## return x + inc - -## def f(n, x, inc): -## while x < n: -## x = g(x, inc=1) -## return x - -## import time -## #t1 = time.time() -## #f(10000000, Number(), 1) -## #t2 = time.time() -## #print t2 - t1 -## t1 = time.time() -## f(10000000, 0, 1) -## t2 = time.time() -## print t2 - t1 - +print "Starting execution..." try: from micronumpy import zeros + import micronumpy as numpy + import micronumpy - size = 128 - dtype = float + from convolve import naive_convolve + from numpybench import generate_kernel, generate_image - for run in range(200): - ar = zeros((size,), dtype=dtype) + image = generate_image(128, 128) + kernel = generate_kernel(5, 5) + + for i in range(100): + result = naive_convolve(image, kernel) - for i in range(size): - ar[i] = dtype(i) * 5 - print i except Exception, e: print "Exception: ", type(e) print e - -## def f(): -## a=7 -## i=0 -## while i<4: -## if i<0: break -## if i<0: break -## i+=1 - -## f() +print "Stopping execution..." Modified: pypy/branch/micronumpy/pypy/module/micronumpy/array.py ============================================================================== --- pypy/branch/micronumpy/pypy/module/micronumpy/array.py (original) +++ pypy/branch/micronumpy/pypy/module/micronumpy/array.py Thu Sep 2 10:06:58 2010 @@ -4,6 +4,8 @@ from pypy.interpreter.gateway import NoneNotWrapped from pypy.interpreter.gateway import interp2app +SQUEEZE_ME = -1 + def stride_row(shape, i): assert i >= 0 stride = 1 @@ -28,33 +30,33 @@ def normalize_slice_starts(slice_starts, shape): for i in range(len(slice_starts)): + #print "slice_start[%d]=%d" % (i, slice_starts[i]) if slice_starts[i] < 0: slice_starts[i] += shape[i] elif slice_starts[i] >= shape[i]: + print "raising" raise IndexError("invalid index") return slice_starts def squeeze_shape(shape): "Simple squeeze." + #return [x for x in shape if x != SQUEEZE_ME] return [x for x in shape if x != 1] -def squeeze_slice(current_shape, starts, shape, step): - current_shape = current_shape[:] +def squeeze(starts, shape, step, strides): + offset = 0 i = 0 stop = len(shape) while i < stop: - if shape[i] == 1: + if shape[i] == SQUEEZE_ME: if i == 0: - offset += starts[i] # FIXME: eh? - offset *= current_shape[i] - else: - step[i-1] += starts[i] # FIXME: eh? - step[i-1] *= current_shape[i] + offset += starts[i] - del current_shape[i] - del starts[i] # XXX: I think this needs to be incorporated... + del starts[i] del shape[i] del step[i] + del strides[i] + stop -= 1 else: i += 1 return offset @@ -64,7 +66,7 @@ try: while shape[prefix] == 1: prefix += 1 except IndexError, e: - prefix = len(shape) # XXX - 1? + prefix = len(shape) return prefix Modified: pypy/branch/micronumpy/pypy/module/micronumpy/microarray.py ============================================================================== --- pypy/branch/micronumpy/pypy/module/micronumpy/microarray.py (original) +++ pypy/branch/micronumpy/pypy/module/micronumpy/microarray.py Thu Sep 2 10:06:58 2010 @@ -15,20 +15,36 @@ from pypy.module.micronumpy.array import infer_shape from pypy.module.micronumpy.array import stride_row, stride_column, size_from_shape from pypy.module.micronumpy.array import normalize_slice_starts -from pypy.module.micronumpy.array import squeeze_slice, squeeze_shape +from pypy.module.micronumpy.array import squeeze_shape +from pypy.module.micronumpy.array import squeeze, SQUEEZE_ME from pypy.module.micronumpy.array import shape_prefix from pypy.rpython.lltypesystem.lltype import cast_ptr_to_int +class FlatIter(Wrappable): + _immutable_fields_ = ['array', 'stop'] + def __init__(self, array): + self.array = array + self.i = 0 + self.stop = size_from_shape(array.shape) + + def descr_iter(self, space): + return space.wrap(self) + descr_iter.unwrap_spec = ['self', ObjSpace] + + def descr_next(self, space): + return self.array.getitem(space, self.i) # FIXME + descr_iter.unwrap_spec = ['self', ObjSpace] + class MicroIter(Wrappable): - _immutable_fields_ = ['step', 'stop', 'ndim'] # XXX: removed array + _immutable_fields_ = ['array', 'step', 'stop', 'ndim'] def __init__(self, array): self.array = array self.i = 0 self.index = array.slice_starts[:] - self.step = array.slice_steps[array.offset] - self.stop = array.shape[array.offset] - self.ndim = len(array.shape[array.offset:]) + self.step = array.slice_steps[array.prefix] + self.stop = array.shape[array.prefix] + self.ndim = len(array.shape) - array.prefix def descr_iter(self, space): return space.wrap(self) @@ -36,25 +52,24 @@ def descr_next(self, space): if self.i < self.stop: + print self.index if self.ndim > 1: ar = MicroArray(self.array.shape, self.array.dtype, parent=self.array, - offset=self.array.offset + 1, + offset=self.array.prefix + 1, strides=self.array.strides, slice_starts=self.index, slice_steps=self.array.slice_steps) - self.i += 1 - self.index[self.array.offset] += self.step - return space.wrap(ar) + next = space.wrap(ar) elif self.ndim == 1: - next = self.array.getitem(space, self.array.flatten_index(self.index)) - self.i += 1 - self.index[self.array.offset] += self.step - return next + next = self.array.getitem(space, self.array.flatten_slice_starts(self.index)) else: raise OperationError(space.w_ValueError, space.wrap("Something is horribly wrong with this array's shape. Has %d dimensions." % len(self.array.shape))) + self.i += 1 + self.index[self.array.prefix] += self.step + return next else: raise OperationError(space.w_StopIteration, space.wrap("")) descr_next.unwrap_spec = ['self', ObjSpace] @@ -64,9 +79,13 @@ next = interp2app(MicroIter.descr_next), ) + class MicroArray(BaseNumArray): _immutable_fields_ = ['shape', 'strides', 'offset', 'slice_starts'] # XXX: removed parent - def __init__(self, shape, dtype, order='C', strides=[], parent=None, offset=0, slice_starts=[], slice_steps=[]): + def __init__(self, shape, dtype, + order='C', strides=[], parent=None, + prefix=0, offset=0, + slice_starts=[], slice_steps=[]): assert dtype is not None self.shape = shape @@ -74,21 +93,21 @@ self.parent = parent self.order = order self.offset = offset + self.prefix = prefix self.slice_starts = slice_starts[:] - for i in range(len(shape) - len(slice_starts)): + for i in range(len(slice_starts), len(shape)): self.slice_starts.append(0) self.slice_steps = slice_steps[:] - for i in range(len(shape) - len(slice_steps)): + for i in range(len(slice_steps), len(shape)): self.slice_steps.append(1) size = size_from_shape(shape) self.strides = strides[:] - stridelen = len(self.strides) - for i in range(len(self.shape) - stridelen): - self.strides.append(self.stride(stridelen + i)) + for i in range(len(self.strides), len(shape)): + self.strides.append(self.stride(i)) if size > 0 and parent is None: self.data = dtype.dtype.alloc(size) @@ -98,7 +117,7 @@ self.data = null_data def descr_len(self, space): - return space.wrap(self.shape[self.offset]) + return space.wrap(self.shape[self.prefix]) descr_len.unwrap_spec = ['self', ObjSpace] def getitem(self, space, offset): @@ -110,9 +129,11 @@ raise OperationError(space.w_IndexError, space.wrap("index out of bounds")) - def setitem(self, space, index, w_value): + def setitem(self, space, offset, w_value): + """Helper function. + Sets a value at an offset in the data.""" try: - self.dtype.dtype.w_setitem(space, self.data, index, w_value) + self.dtype.dtype.w_setitem(space, self.data, offset, w_value) except IndexError, e: raise OperationError(space.w_IndexError, space.wrap("index out of bounds")) @@ -122,10 +143,20 @@ Gives offset into subarray, not into data.""" offset = 0 for i in range(len(slice_starts)): + offset += slice_starts[i] * self.strides[i] + #print offset + return offset + + def flatten_slice_starts2(self, slice_starts): + """Computes offset into subarray from all information. + Gives offset into subarray, not into data.""" + offset = 0 + for i in range(len(slice_starts)): offset += (self.slice_steps[i] * slice_starts[i]) * self.strides[i] + print offset return offset - flatten_index = flatten_slice_starts # TODO: migrate to slice_starts for name? + flatten_index = flatten_slice_starts2 # TODO: migrate to slice_starts for name? def stride(self, i): if self.order == 'C': @@ -146,10 +177,13 @@ index = space.int_w(space.index(w_index)) # Normalize if index < 0: - index += self.shape[self.offset] + index += self.shape[self.prefix] + elif index > self.shape[self.prefix]: + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) # FIXME: message - slice_starts[self.offset] += index * self.slice_steps[self.offset] - shape[self.offset] = 1 + slice_starts[self.prefix] += index * self.slice_steps[self.prefix] + shape[self.prefix] = 1 #SQUEEZE_ME return slice_starts, shape, slice_steps except OperationError, e: if e.match(space, space.w_TypeError): pass @@ -157,9 +191,9 @@ if isinstance(w_index, W_SliceObject): start, stop, step, length = w_index.indices4(space, self.shape[0]) - slice_starts[self.offset] += start * slice_steps[self.offset] - shape[self.offset] = length - slice_steps[self.offset] *= step + slice_starts[self.prefix] += start * slice_steps[self.prefix] + shape[self.prefix] = length + slice_steps[self.prefix] *= step return slice_starts, shape, slice_steps elif space.is_w(w_index, space.w_Ellipsis): return slice_starts, shape, slice_steps @@ -168,13 +202,16 @@ indices = space.fixedview(w_index) indexlen = len(indices) + if indexlen != len(self.shape): # FIXME: shape will often be larger... + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) # FIXME: message for i in range(indexlen): w_index = indices[i] try: index = space.int_w(space.index(w_index)) - slice_starts[self.offset + i] += index - shape[self.offset + i] = 1 + slice_starts[self.prefix + i] += index + shape[self.prefix + i] = 1 #SQUEEZE_ME continue except OperationError, e: @@ -183,9 +220,9 @@ if isinstance(w_index, W_SliceObject): start, stop, step, length = w_index.indices4(space, self.shape[i]) - slice_starts[self.offset + i] += start * slice_steps[self.offset + i] - shape[self.offset + i] = length - slice_steps[self.offset + i] *= step + slice_starts[self.prefix + i] += start * slice_steps[self.prefix + i] + shape[self.prefix + i] = length + slice_steps[self.prefix + i] *= step elif space.is_w(w_index, space.w_Ellipsis): pass # I can't think of anything we need to do else: @@ -212,13 +249,14 @@ if size == 1: return self.getitem(space, - self.flatten_index(slice_starts)) + self.flatten_slice_starts(slice_starts)) else: prefix = shape_prefix(shape) ar = MicroArray(shape, dtype=self.dtype, parent=self, offset=prefix, # XXX: what do we do about shapes that needs to be squeezed out? + strides=self.strides[:], slice_starts=slice_starts, slice_steps=slice_steps) return space.wrap(ar) @@ -372,7 +410,7 @@ return space.wrap(self.dtype) def descr_get_shape(space, self): - return space.newtuple([space.wrap(x) for x in self.shape[self.offset:]]) + return space.newtuple([space.wrap(x) for x in self.shape[self.prefix:]]) def descr_get_array_interface(space, self): w_dict = space.newdict() Modified: pypy/branch/micronumpy/pypy/module/micronumpy/test/test_numpy.py ============================================================================== --- pypy/branch/micronumpy/pypy/module/micronumpy/test/test_numpy.py (original) +++ pypy/branch/micronumpy/pypy/module/micronumpy/test/test_numpy.py Thu Sep 2 10:06:58 2010 @@ -463,6 +463,16 @@ assert shape[0] == 2 assert step[0] == 1 + def test_squeeze(self, space): + from pypy.module.micronumpy.array import squeeze + from pypy.module.micronumpy.microarray import SQUEEZE_ME + slice_starts = [1, 2, 3] + shape = [1, SQUEEZE_ME, 3] + slice_steps = [1, 1, 1] + strides = [1, 2, 3] + + offset = squeeze(slice_starts, shape, slice_steps, strides) + def test_slice_setting(self, space): from pypy.module.micronumpy.array import size_from_shape from pypy.module.micronumpy.microarray import MicroArray From afa at codespeak.net Thu Sep 2 13:14:01 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Thu, 2 Sep 2010 13:14:01 +0200 (CEST) Subject: [pypy-svn] r76832 - in pypy/trunk/pypy: module/cpyext rpython/lltypesystem Message-ID: <20100902111401.B0D46282B9C@codespeak.net> Author: afa Date: Thu Sep 2 13:13:58 2010 New Revision: 76832 Modified: pypy/trunk/pypy/module/cpyext/api.py pypy/trunk/pypy/rpython/lltypesystem/lltype.py Log: cpyext needs a *copy* of rffi.CCHARP which is distinct, but equal to rffi.CCHARP. Modified: pypy/trunk/pypy/module/cpyext/api.py ============================================================================== --- pypy/trunk/pypy/module/cpyext/api.py (original) +++ pypy/trunk/pypy/module/cpyext/api.py Thu Sep 2 13:13:58 2010 @@ -62,11 +62,15 @@ VA_LIST_P = rffi.VOIDP # rffi.COpaquePtr('va_list') CONST_STRING = lltype.Ptr(lltype.Array(lltype.Char, - hints={'nolength': True})) + hints={'nolength': True}), + use_cache=False) CONST_WSTRING = lltype.Ptr(lltype.Array(lltype.UniChar, - hints={'nolength': True})) + hints={'nolength': True}), + use_cache=False) assert CONST_STRING is not rffi.CCHARP +assert CONST_STRING == rffi.CCHARP assert CONST_WSTRING is not rffi.CWCHARP +assert CONST_WSTRING == rffi.CWCHARP # FILE* interface FILEP = rffi.COpaquePtr('FILE') Modified: pypy/trunk/pypy/rpython/lltypesystem/lltype.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/lltype.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/lltype.py Thu Sep 2 13:13:58 2010 @@ -624,16 +624,19 @@ __name__ = property(lambda self: '%sPtr' % self.TO.__name__) _cache = weakref.WeakKeyDictionary() # cache the Ptrs - def __new__(cls, TO): - try: - obj = Ptr._cache[TO] - except KeyError: - obj = Ptr._cache[TO] = LowLevelType.__new__(cls) - except TypeError: + def __new__(cls, TO, use_cache=True): + if not use_cache: obj = LowLevelType.__new__(cls) + else: + try: + obj = Ptr._cache[TO] + except KeyError: + obj = Ptr._cache[TO] = LowLevelType.__new__(cls) + except TypeError: + obj = LowLevelType.__new__(cls) return obj - def __init__(self, TO): + def __init__(self, TO, use_cache=True): if not isinstance(TO, ContainerType): raise TypeError, ("can only point to a Container type, " "not to %s" % (TO,)) From arigo at codespeak.net Thu Sep 2 14:16:44 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 2 Sep 2010 14:16:44 +0200 (CEST) Subject: [pypy-svn] r76833 - pypy/trunk/pypy/rpython/lltypesystem Message-ID: <20100902121644.B2F95282B9C@codespeak.net> Author: arigo Date: Thu Sep 2 14:16:42 2010 New Revision: 76833 Modified: pypy/trunk/pypy/rpython/lltypesystem/lltype.py Log: Fixes to lltype.py: * use WeakValueDictionary, not WeakKeyDictionary, for these two caches. They did not work properly: e.g. after you do p1 = Ptr(Array()); p2 = Ptr(Array()) then p1 is p2, but this single object (let's call it p) has p.TO being the second Array instance, while the _cache still has a reference to the first Array instance -- which goes away as soon as the second Ptr() is built. * move TLS as a local (to shut off some errors occurring at interpreter shut-down). Modified: pypy/trunk/pypy/rpython/lltypesystem/lltype.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/lltype.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/lltype.py Thu Sep 2 14:16:42 2010 @@ -37,7 +37,7 @@ return ''%(self.TYPE,) -def saferecursive(func, defl): +def saferecursive(func, defl, TLS=TLS): def safe(*args): try: seeing = TLS.seeing @@ -54,7 +54,7 @@ return safe #safe_equal = saferecursive(operator.eq, True) -def safe_equal(x, y): +def safe_equal(x, y, TLS=TLS): # a specialized version for performance try: seeing = TLS.seeing_eq @@ -97,7 +97,7 @@ raise TypeError return value - def __hash__(self): + def __hash__(self, TLS=TLS): # cannot use saferecursive() -- see test_lltype.test_hash(). # NB. the __cached_hash should neither be used nor updated # if we enter with hash_level > 0, because the computed @@ -401,7 +401,7 @@ # behaves more or less like a Struct with fields item0, item1, ... # but also supports __getitem__(), __setitem__(), __len__(). - _cache = weakref.WeakKeyDictionary() # cache the length-1 FixedSizeArrays + _cache = weakref.WeakValueDictionary() # cache the length-1 FixedSizeArrays def __new__(cls, OF, length, **kwds): if length == 1 and not kwds: try: @@ -623,25 +623,23 @@ class Ptr(LowLevelType): __name__ = property(lambda self: '%sPtr' % self.TO.__name__) - _cache = weakref.WeakKeyDictionary() # cache the Ptrs + _cache = weakref.WeakValueDictionary() # cache the Ptrs def __new__(cls, TO, use_cache=True): + if not isinstance(TO, ContainerType): + raise TypeError, ("can only point to a Container type, " + "not to %s" % (TO,)) if not use_cache: obj = LowLevelType.__new__(cls) else: try: - obj = Ptr._cache[TO] + return Ptr._cache[TO] except KeyError: obj = Ptr._cache[TO] = LowLevelType.__new__(cls) except TypeError: obj = LowLevelType.__new__(cls) + obj.TO = TO return obj - def __init__(self, TO, use_cache=True): - if not isinstance(TO, ContainerType): - raise TypeError, ("can only point to a Container type, " - "not to %s" % (TO,)) - self.TO = TO - def _needsgc(self): # XXX deprecated interface return self.TO._gckind not in ('raw', 'prebuilt') From arigo at codespeak.net Thu Sep 2 14:59:33 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 2 Sep 2010 14:59:33 +0200 (CEST) Subject: [pypy-svn] r76835 - in pypy/branch/no-_immutable_/pypy/rpython/lltypesystem: . test Message-ID: <20100902125933.40606282B9C@codespeak.net> Author: arigo Date: Thu Sep 2 14:59:29 2010 New Revision: 76835 Modified: pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lltype.py pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/test/test_lltype.py Log: Create an interface to immutable / immutable_fields. Modified: pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lltype.py ============================================================================== --- pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lltype.py (original) +++ pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lltype.py Thu Sep 2 14:59:29 2010 @@ -297,6 +297,15 @@ n = 1 return _struct(self, n, initialization='example') + def _immutable_field(self, field): + if 'immutable_fields' in self._hints: + try: + s = self._hints['immutable_fields'].fields[field] + return s or True + except KeyError: + pass + return self._hints.get('immutable', False) + class RttiStruct(Struct): _runtime_type_info = None Modified: pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/test/test_lltype.py ============================================================================== --- pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/test/test_lltype.py (original) +++ pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/test/test_lltype.py Thu Sep 2 14:59:29 2010 @@ -781,6 +781,28 @@ p = cast_opaque_ptr(llmemory.GCREF, a) assert hash1 == identityhash(p) +def test_immutable_hint(): + S = GcStruct('S', ('x', lltype.Signed)) + assert S._immutable_field('x') == False + # + S = GcStruct('S', ('x', lltype.Signed), hints={'immutable': True}) + assert S._immutable_field('x') == True + # + class FieldListAccessor(object): + def __init__(self, fields): + self.fields = fields + S = GcStruct('S', ('x', lltype.Signed), + hints={'immutable_fields': FieldListAccessor({'x':''})}) + assert S._immutable_field('x') == True + # + class FieldListAccessor(object): + def __init__(self, fields): + self.fields = fields + S = GcStruct('S', ('x', lltype.Signed), + hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) + assert S._immutable_field('x') == '[*]' + + class TestTrackAllocation: def setup_method(self, func): start_tracking_allocations() From afa at codespeak.net Thu Sep 2 15:29:08 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Thu, 2 Sep 2010 15:29:08 +0200 (CEST) Subject: [pypy-svn] r76836 - pypy/trunk/pypy/translator/c/gcc Message-ID: <20100902132908.814A836C222@codespeak.net> Author: afa Date: Thu Sep 2 15:29:04 2010 New Revision: 76836 Modified: pypy/trunk/pypy/translator/c/gcc/trackgcroot.py Log: A new x86 instruction I'd never seen before (Convert Word to Extended Doubleword) we may ignore it. Modified: pypy/trunk/pypy/translator/c/gcc/trackgcroot.py ============================================================================== --- pypy/trunk/pypy/translator/c/gcc/trackgcroot.py (original) +++ pypy/trunk/pypy/translator/c/gcc/trackgcroot.py Thu Sep 2 15:29:04 2010 @@ -375,7 +375,7 @@ IGNORE_OPS_WITH_PREFIXES = dict.fromkeys([ 'cmp', 'test', 'set', 'sahf', 'lahf', 'cltd', 'cld', 'std', - 'rep', 'movs', 'lods', 'stos', 'scas', 'cwtl', 'prefetch', + 'rep', 'movs', 'lods', 'stos', 'scas', 'cwtl', 'cwde', 'prefetch', # floating-point operations cannot produce GC pointers 'f', 'cvt', 'ucomi', 'comi', 'subs', 'subp' , 'adds', 'addp', 'xorp', From arigo at codespeak.net Thu Sep 2 16:11:24 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 2 Sep 2010 16:11:24 +0200 (CEST) Subject: [pypy-svn] r76837 - in pypy/branch/no-_immutable_/pypy/rpython: . ootypesystem test Message-ID: <20100902141124.919C7282BE3@codespeak.net> Author: arigo Date: Thu Sep 2 16:11:22 2010 New Revision: 76837 Modified: pypy/branch/no-_immutable_/pypy/rpython/ootypesystem/ootype.py pypy/branch/no-_immutable_/pypy/rpython/ootypesystem/rclass.py pypy/branch/no-_immutable_/pypy/rpython/rclass.py pypy/branch/no-_immutable_/pypy/rpython/test/test_rclass.py Log: Write tests about various combinations of _immutable_, _immutable_fields_, and subclassing. Implement that. Modified: pypy/branch/no-_immutable_/pypy/rpython/ootypesystem/ootype.py ============================================================================== --- pypy/branch/no-_immutable_/pypy/rpython/ootypesystem/ootype.py (original) +++ pypy/branch/no-_immutable_/pypy/rpython/ootypesystem/ootype.py Thu Sep 2 16:11:22 2010 @@ -267,6 +267,14 @@ return self._fields_with_default[:] return self._superclass._get_fields_with_default() + self._fields_with_default + def _immutable_field(self, field): + if 'immutable_fields' in self._hints: + try: + s = self._hints['immutable_fields'].fields[field] + return s or True + except KeyError: + pass + return self._hints.get('immutable', False) class SpecializableType(OOType): Modified: pypy/branch/no-_immutable_/pypy/rpython/ootypesystem/rclass.py ============================================================================== --- pypy/branch/no-_immutable_/pypy/rpython/ootypesystem/rclass.py (original) +++ pypy/branch/no-_immutable_/pypy/rpython/ootypesystem/rclass.py Thu Sep 2 16:11:22 2010 @@ -194,6 +194,7 @@ self.lowleveltype._hints.update(hints) if self.classdef is None: + self.fields = {} self.allfields = {} self.allmethods = {} self.allclassattributes = {} @@ -210,6 +211,7 @@ allclassattributes = {} fields = {} + nonmangledfields = [] fielddefaults = {} if llfields: @@ -224,6 +226,7 @@ allfields[mangled] = repr oot = repr.lowleveltype fields[mangled] = oot + nonmangledfields.append(name) try: value = self.classdef.classdesc.read_attribute(name) fielddefaults[mangled] = repr.convert_desc_or_const(value) @@ -294,6 +297,7 @@ if not attrdef.s_value.is_constant(): classattributes[mangled] = attrdef.s_value, value + self.fields = nonmangledfields self.allfields = allfields self.allmethods = allmethods self.allclassattributes = allclassattributes Modified: pypy/branch/no-_immutable_/pypy/rpython/rclass.py ============================================================================== --- pypy/branch/no-_immutable_/pypy/rpython/rclass.py (original) +++ pypy/branch/no-_immutable_/pypy/rpython/rclass.py Thu Sep 2 16:11:22 2010 @@ -19,8 +19,8 @@ return True class ImmutableConflictError(Exception): - """Raised when an attribute 'x' is found on some parent class, - but defined in a subclass to be in _immutable_fields_.""" + """Raised when the _immutable_ or _immutable_fields_ hints are + not consistent across a class hierarchy.""" def getclassrepr(rtyper, classdef): @@ -157,10 +157,9 @@ pass def _check_for_immutable_hints(self, hints): - if '_immutable_' in self.classdef.classdesc.classdict: - raise TyperError( - "%r: the _immutable_ hint is not supported any more.\n" - "Use _immutable_fields_ instead." % (self,)) + if self.classdef.classdesc.lookup('_immutable_') is not None: + hints = hints.copy() + hints['immutable'] = True self.immutable_field_list = [] # unless overwritten below if self.classdef.classdesc.lookup('_immutable_fields_') is not None: hints = hints.copy() @@ -187,39 +186,29 @@ return 'InstanceR %s' % (clsname,) def _setup_repr_final(self): + self._setup_immutable_field_list() + self._check_for_immutable_conflicts() + + def _setup_immutable_field_list(self): hints = self.object_type._hints if "immutable_fields" in hints: accessor = hints["immutable_fields"] - immutable_fields = {} - rbase = self - while rbase.classdef is not None: - immutable_fields.update( - dict.fromkeys(rbase.immutable_field_list, rbase)) - rbase = rbase.rbase - self._parse_field_list(immutable_fields, accessor) + if not hasattr(accessor, 'fields'): + immutable_fields = [] + rbase = self + while rbase.classdef is not None: + immutable_fields += rbase.immutable_field_list + rbase = rbase.rbase + self._parse_field_list(immutable_fields, accessor) def _parse_field_list(self, fields, accessor): with_suffix = {} - for name, rbase in fields.items(): + for name in fields: if name.endswith('[*]'): name = name[:-3] suffix = '[*]' else: suffix = '' - # - # check that the field is not higher up the class hierarchy - # than where it was originally defined in _immutable_fields_ - if rbase.rbase.classdef is not None: - try: - rbase.rbase._get_field(name) - except KeyError: - pass - else: - raise ImmutableConflictError( - "the field %r is declared in _immutable_fields_ in " - "class %r, but actually ends up in parent class" - % (name, rbase)) - # try: mangled_name, r = self._get_field(name) except KeyError: @@ -228,6 +217,36 @@ accessor.initialize(self.object_type, with_suffix) return with_suffix + def _check_for_immutable_conflicts(self): + # check for conflicts, i.e. a field that is defined normally as + # mutable in some parent class but that is now declared immutable + from pypy.rpython.lltypesystem.lltype import Void + is_self_immutable = "immutable" in self.object_type._hints + base = self + while base.classdef is not None: + base = base.rbase + for fieldname in base.fields: + try: + mangled, r = base._get_field(fieldname) + except KeyError: + continue + if r.lowleveltype == Void: + continue + base._setup_immutable_field_list() + if base.object_type._immutable_field(mangled): + continue + # 'fieldname' is a mutable, non-Void field in the parent + if is_self_immutable: + raise ImmutableConflictError( + "class %r has _immutable_=True, but parent class %r " + "defines (at least) the mutable field %r" % ( + self, base, fieldname)) + if fieldname in self.immutable_field_list: + raise ImmutableConflictError( + "field %r is defined mutable in class %r, but " + "listed in _immutable_fields_ in subclass %r" % ( + fieldname, base, self)) + def new_instance(self, llops, classcallhop=None): raise NotImplementedError Modified: pypy/branch/no-_immutable_/pypy/rpython/test/test_rclass.py ============================================================================== --- pypy/branch/no-_immutable_/pypy/rpython/test/test_rclass.py (original) +++ pypy/branch/no-_immutable_/pypy/rpython/test/test_rclass.py Thu Sep 2 16:11:22 2010 @@ -709,7 +709,7 @@ def test_immutable(self): class I(object): - _immutable_fields_ = ["v"] + _immutable_ = True def __init__(self, v): self.v = v @@ -796,7 +796,7 @@ assert accessor.fields == {"inst_y" : ""} or \ accessor.fields == {"oy" : ""} # for ootype - def test_immutable_forbidden_inheritance(self): + def test_immutable_forbidden_inheritance_1(self): from pypy.rpython.rclass import ImmutableConflictError class A(object): pass @@ -805,9 +805,84 @@ def f(): A().v = 123 B() # crash: class B says 'v' is immutable, - # but it is defined on parent class I + # but it is defined on parent class A py.test.raises(ImmutableConflictError, self.gengraph, f, []) + def test_immutable_forbidden_inheritance_2(self): + from pypy.rpython.rclass import ImmutableConflictError + class A(object): + pass + class B(A): + _immutable_ = True + def f(): + A().v = 123 + B() # crash: class B has _immutable_ = True + # but class A defines 'v' to be mutable + py.test.raises(ImmutableConflictError, self.gengraph, f, []) + + def test_immutable_ok_inheritance_2(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + _immutable_fields_ = ['v'] + class B(A): + _immutable_ = True + def f(): + A().v = 123 + B().w = 456 + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] + try: + A_TYPE = B_TYPE.super + except AttributeError: + A_TYPE = B_TYPE._superclass # for ootype + accessor = A_TYPE._hints["immutable_fields"] + assert accessor.fields == {"inst_v" : ""} or \ + accessor.fields == {"ov" : ""} # for ootype + + def test_immutable_subclass_1(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + _immutable_ = True + class B(A): + pass + def f(): + B().v = 123 + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] # inherited from A + + def test_immutable_subclass_2(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + pass + class B(A): + _immutable_ = True + def f(): + B().v = 123 + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] + + def test_immutable_subclass_void(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + pass + class B(A): + _immutable_ = True + def myfunc(): + pass + def f(): + A().f = myfunc # it's ok to add Void attributes to A + B().v = 123 # even though only B is declared _immutable_ + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] + class TestLLtype(BaseTestRclass, LLRtypeMixin): From arigo at codespeak.net Thu Sep 2 16:12:06 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 2 Sep 2010 16:12:06 +0200 (CEST) Subject: [pypy-svn] r76838 - pypy/branch/no-_immutable_/pypy/objspace/std Message-ID: <20100902141206.CDBF0282BDC@codespeak.net> Author: arigo Date: Thu Sep 2 16:12:05 2010 New Revision: 76838 Modified: pypy/branch/no-_immutable_/pypy/objspace/std/tupleobject.py Log: A more precise hint. Modified: pypy/branch/no-_immutable_/pypy/objspace/std/tupleobject.py ============================================================================== --- pypy/branch/no-_immutable_/pypy/objspace/std/tupleobject.py (original) +++ pypy/branch/no-_immutable_/pypy/objspace/std/tupleobject.py Thu Sep 2 16:12:05 2010 @@ -10,7 +10,7 @@ class W_TupleObject(W_Object): from pypy.objspace.std.tupletype import tuple_typedef as typedef - _immutable_ = True + _immutable_fields_ = ['wrappeditems[*]'] def __init__(w_self, wrappeditems): make_sure_not_resized(wrappeditems) From arigo at codespeak.net Thu Sep 2 16:23:33 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 2 Sep 2010 16:23:33 +0200 (CEST) Subject: [pypy-svn] r76839 - in pypy/branch/no-_immutable_/pypy: jit/codewriter rpython/lltypesystem Message-ID: <20100902142333.AB231282B9C@codespeak.net> Author: arigo Date: Thu Sep 2 16:23:32 2010 New Revision: 76839 Modified: pypy/branch/no-_immutable_/pypy/jit/codewriter/jtransform.py pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lloperation.py pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lltype.py pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/opimpl.py Log: A few fixes. Untested so far. Modified: pypy/branch/no-_immutable_/pypy/jit/codewriter/jtransform.py ============================================================================== --- pypy/branch/no-_immutable_/pypy/jit/codewriter/jtransform.py (original) +++ pypy/branch/no-_immutable_/pypy/jit/codewriter/jtransform.py Thu Sep 2 16:23:32 2010 @@ -511,14 +511,11 @@ arraydescr) return [] # check for deepfrozen structures that force constant-folding - hints = v_inst.concretetype.TO._hints - accessor = hints.get("immutable_fields") - if accessor and c_fieldname.value in accessor.fields: + immut = v_inst.concretetype.TO._immutable_field(c_fieldname.value) + if immut: pure = '_pure' - if accessor.fields[c_fieldname.value] == "[*]": + if immut == "[*]": self.immutable_arrays[op.result] = True - elif hints.get('immutable'): - pure = '_pure' else: pure = '' argname = getattr(v_inst.concretetype.TO, '_gckind', 'gc') Modified: pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lloperation.py ============================================================================== --- pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lloperation.py (original) +++ pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lloperation.py Thu Sep 2 16:23:32 2010 @@ -85,16 +85,17 @@ fold = roproperty(get_fold_impl) def is_pure(self, args_v): - return (self.canfold or # canfold => pure operation - self is llop.debug_assert or # debug_assert is pure enough - # reading from immutable - (self in (llop.getfield, llop.getarrayitem) and - args_v[0].concretetype.TO._hints.get('immutable')) or - (self is llop.getfield and # reading from immutable_field - 'immutable_fields' in args_v[0].concretetype.TO._hints and - args_v[1].value in args_v[0].concretetype.TO - ._hints['immutable_fields'].fields)) - # XXX: what about ootype immutable arrays? + return ( + self.canfold or # canfold => pure operation + self is llop.debug_assert or # debug_assert is pure enough + # reading from immutable (lltype) + (self in (llop.getfield, llop.getarrayitem) and + isinstance(args_v[1], Constant) and + args_v[0].concretetype.TO._immutable_field(args_v[1].value)) or + # reading from immutable (ootype) (xxx what about arrays?) + (self is llop.oogetfield and + isinstance(args_v[1], Constant) and + args_v[0].concretetype._immutable_field(args_v[1].value))) def __repr__(self): return '' % (getattr(self, 'opname', '?'),) Modified: pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lltype.py ============================================================================== --- pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lltype.py (original) +++ pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lltype.py Thu Sep 2 16:23:32 2010 @@ -400,6 +400,9 @@ def _container_example(self): return _array(self, 1, initialization='example') + def _immutable_field(self, index): + return self._hints.get('immutable', False) + class GcArray(Array): _gckind = 'gc' def _inline_is_varsize(self, last): Modified: pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/opimpl.py ============================================================================== --- pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/opimpl.py (original) +++ pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/opimpl.py Thu Sep 2 16:23:32 2010 @@ -150,12 +150,7 @@ # we can constant-fold this if the innermost structure from which we # read the final field is immutable. T = lltype.typeOf(innermostcontainer).TO - if T._hints.get('immutable'): - pass - elif ('immutable_fields' in T._hints and - offsets[-1] in T._hints['immutable_fields'].fields): - pass - else: + if not T._immutable_field(offsets[-1]): raise TypeError("cannot fold getinteriorfield on mutable struct") assert not isinstance(ob, lltype._interior_ptr) return ob @@ -418,19 +413,15 @@ def op_getfield(p, name): checkptr(p) TYPE = lltype.typeOf(p).TO - if TYPE._hints.get('immutable'): - pass - elif ('immutable_fields' in TYPE._hints and - name in TYPE._hints['immutable_fields'].fields): - pass - else: + if not TYPE._immutable_field(name): raise TypeError("cannot fold getfield on mutable struct") return getattr(p, name) def op_getarrayitem(p, index): checkptr(p) - if not lltype.typeOf(p).TO._hints.get('immutable'): - raise TypeError("cannot fold getfield on mutable array") + ARRAY = lltype.typeOf(p).TO + if not ARRAY._immutable_field(index): + raise TypeError("cannot fold getarrayitem on mutable array") return p[index] def _normalize(x): From arigo at codespeak.net Thu Sep 2 16:37:17 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 2 Sep 2010 16:37:17 +0200 (CEST) Subject: [pypy-svn] r76840 - in pypy/branch/no-_immutable_/pypy/rpython: . lltypesystem lltypesystem/test Message-ID: <20100902143717.03730282B9C@codespeak.net> Author: arigo Date: Thu Sep 2 16:37:16 2010 New Revision: 76840 Modified: pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lloperation.py pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lltype.py pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/test/test_lloperation.py pypy/branch/no-_immutable_/pypy/rpython/rclass.py Log: Fixes. Modified: pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lloperation.py ============================================================================== --- pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lloperation.py (original) +++ pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lloperation.py Thu Sep 2 16:37:16 2010 @@ -85,17 +85,20 @@ fold = roproperty(get_fold_impl) def is_pure(self, args_v): - return ( - self.canfold or # canfold => pure operation - self is llop.debug_assert or # debug_assert is pure enough - # reading from immutable (lltype) - (self in (llop.getfield, llop.getarrayitem) and - isinstance(args_v[1], Constant) and - args_v[0].concretetype.TO._immutable_field(args_v[1].value)) or - # reading from immutable (ootype) (xxx what about arrays?) - (self is llop.oogetfield and - isinstance(args_v[1], Constant) and - args_v[0].concretetype._immutable_field(args_v[1].value))) + if self.canfold: # canfold => pure operation + return True + if self is llop.debug_assert: # debug_assert is pure enough + return True + # reading from immutable (lltype) + if self is llop.getfield or self is llop.getarrayitem: + field = getattr(args_v[1], 'value', None) + return args_v[0].concretetype.TO._immutable_field(field) + # reading from immutable (ootype) (xxx what about arrays?) + if self is llop.oogetfield: + field = getattr(args_v[1], 'value', None) + return args_v[0].concretetype._immutable_field(field) + # default + return False def __repr__(self): return '' % (getattr(self, 'opname', '?'),) Modified: pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lltype.py ============================================================================== --- pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lltype.py (original) +++ pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/lltype.py Thu Sep 2 16:37:16 2010 @@ -400,7 +400,7 @@ def _container_example(self): return _array(self, 1, initialization='example') - def _immutable_field(self, index): + def _immutable_field(self, index=None): return self._hints.get('immutable', False) class GcArray(Array): Modified: pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/test/test_lloperation.py ============================================================================== --- pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/test/test_lloperation.py (original) +++ pypy/branch/no-_immutable_/pypy/rpython/lltypesystem/test/test_lloperation.py Thu Sep 2 16:37:16 2010 @@ -88,7 +88,7 @@ accessor = rclass.FieldListAccessor() S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), hints={'immutable_fields': accessor}) - accessor.initialize(S3, ['x']) + accessor.initialize(S3, {'x': ''}) v_s3 = Variable() v_s3.concretetype = lltype.Ptr(S3) assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) @@ -103,7 +103,7 @@ accessor = rclass.FieldListAccessor() S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), hints={'immutable_fields': accessor}) - accessor.initialize(S3, ['x']) + accessor.initialize(S3, {'x': ''}) # s1 = lltype.malloc(S1); s1.x = 45 py.test.raises(TypeError, llop.getfield, lltype.Signed, s1, 'x') Modified: pypy/branch/no-_immutable_/pypy/rpython/rclass.py ============================================================================== --- pypy/branch/no-_immutable_/pypy/rpython/rclass.py (original) +++ pypy/branch/no-_immutable_/pypy/rpython/rclass.py Thu Sep 2 16:37:16 2010 @@ -9,6 +9,7 @@ class FieldListAccessor(object): def initialize(self, TYPE, fields): + assert type(fields) is dict self.TYPE = TYPE self.fields = fields From afa at codespeak.net Thu Sep 2 18:14:35 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Thu, 2 Sep 2010 18:14:35 +0200 (CEST) Subject: [pypy-svn] r76841 - in pypy/trunk/pypy: bin interpreter Message-ID: <20100902161435.922B7282BDC@codespeak.net> Author: afa Date: Thu Sep 2 18:14:33 2010 New Revision: 76841 Modified: pypy/trunk/pypy/bin/py.py pypy/trunk/pypy/interpreter/baseobjspace.py Log: issue554 resolved: On exit, the interpreter should wait for non-daemon threads. Tested on top of bin/py.py Modified: pypy/trunk/pypy/bin/py.py ============================================================================== --- pypy/trunk/pypy/bin/py.py (original) +++ pypy/trunk/pypy/bin/py.py Thu Sep 2 18:14:33 2010 @@ -76,6 +76,8 @@ config.objspace.suggest(allworkingmodules=False) if config.objspace.allworkingmodules: pypyoption.enable_allworkingmodules(config) + if config.objspace.usemodules.thread: + config.translation.thread = True # create the object space Modified: pypy/trunk/pypy/interpreter/baseobjspace.py ============================================================================== --- pypy/trunk/pypy/interpreter/baseobjspace.py (original) +++ pypy/trunk/pypy/interpreter/baseobjspace.py Thu Sep 2 18:14:33 2010 @@ -288,6 +288,7 @@ self.timer.stop("startup " + modname) def finish(self): + self.wait_for_thread_shutdown() w_exitfunc = self.sys.getdictvalue(self, 'exitfunc') if w_exitfunc is not None: self.call_function(w_exitfunc) @@ -305,6 +306,23 @@ for s in self.FrameClass._space_op_types: print s + def wait_for_thread_shutdown(self): + """Wait until threading._shutdown() completes, provided the threading + module was imported in the first place. The shutdown routine will + wait until all non-daemon 'threading' threads have completed.""" + if not self.config.translation.thread: + return + + w_modules = self.sys.get('modules') + w_mod = self.finditem_str(w_modules, 'threading') + if w_mod is None: + return + + try: + self.call_method(w_mod, "_shutdown") + except OperationError, e: + e.write_unraisable(self, "threading._shutdown()") + def reportbytecodecounts(self): os.write(2, "Starting bytecode report.\n") fd = os.open('bytecode.txt', os.O_CREAT|os.O_WRONLY|os.O_TRUNC, 0644) From arigo at codespeak.net Thu Sep 2 19:43:43 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 2 Sep 2010 19:43:43 +0200 (CEST) Subject: [pypy-svn] r76842 - pypy/trunk/pypy/objspace/std Message-ID: <20100902174343.190F6282BDC@codespeak.net> Author: arigo Date: Thu Sep 2 19:43:33 2010 New Revision: 76842 Modified: pypy/trunk/pypy/objspace/std/objspace.py Log: (cfbolz, arigo) Essential move of an empty line. Modified: pypy/trunk/pypy/objspace/std/objspace.py ============================================================================== --- pypy/trunk/pypy/objspace/std/objspace.py (original) +++ pypy/trunk/pypy/objspace/std/objspace.py Thu Sep 2 19:43:33 2010 @@ -405,8 +405,8 @@ def getattr(self, w_obj, w_name): if not self.config.objspace.std.getattributeshortcut: return DescrOperation.getattr(self, w_obj, w_name) - # an optional shortcut for performance + w_type = self.type(w_obj) w_descr = w_type.getattribute_if_not_from_object() if w_descr is not None: From hakanardo at codespeak.net Thu Sep 2 19:53:25 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Thu, 2 Sep 2010 19:53:25 +0200 (CEST) Subject: [pypy-svn] r76843 - in pypy/branch/jit-bounds/pypy/jit/metainterp: optimizeopt test Message-ID: <20100902175325.52941282BDC@codespeak.net> Author: hakanardo Date: Thu Sep 2 19:53:23 2010 New Revision: 76843 Added: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/intutils.py Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/__init__.py pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/intbounds.py pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/rewrite.py pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_intbound.py Log: separated intbound optimisations Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/__init__.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/__init__.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/__init__.py Thu Sep 2 19:53:23 2010 @@ -1,5 +1,6 @@ from optimizer import Optimizer -from rewrite import Rewrite +from rewrite import OptRewrite +from intbounds import OptIntBounds def optimize_loop_1(metainterp_sd, loop, virtuals=True): """Optimize loop.operations to make it match the input of loop.specnodes @@ -7,7 +8,9 @@ must be applicable to the loop; you will probably get an AssertionError if not. """ - optimizations = (Rewrite(),) + optimizations = [OptIntBounds(), + OptRewrite(), + ] optimizer = Optimizer(metainterp_sd, loop, optimizations) if virtuals: optimizer.setup_virtuals_and_constants() Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/intbounds.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/intbounds.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/intbounds.py Thu Sep 2 19:53:23 2010 @@ -1,7 +1,303 @@ -from optimizer import Optimization +from optimizer import Optimization, CONST_1, CONST_0 +from pypy.jit.metainterp.optimizeutil import _findall +from intutils import IntBound, IntUnbounded +from pypy.jit.metainterp.history import Const, ConstInt +from pypy.jit.metainterp.resoperation import rop, ResOperation -class IntBounds(Optimization): +class OptIntBounds(Optimization): """Keeps track of the bounds placed on integers by the guards and remove redundant guards""" - # FIXME: Move here + + def propagate_forward(self, op): + opnum = op.opnum + for value, func in optimize_ops: + if opnum == value: + func(self, op) + break + else: + self.emit_operation(op) + + def propagate_bounds_backward(self, box): + # FIXME: This takes care of the instruction where box is the reuslt + # but the bounds produced by all instructions where box is + # an argument might also be tighten + v = self.getvalue(box) + b = v.intbound + if b.has_lower and b.has_upper and b.lower == b.upper: + v.make_constant(ConstInt(b.lower)) + + try: + op = self.optimizer.producer[box] + except KeyError: + return + opnum = op.opnum + for value, func in propagate_bounds_ops: + if opnum == value: + func(self, op) + break + + def optimize_GUARD_TRUE(self, op): + self.emit_operation(op) + self.propagate_bounds_backward(op.args[0]) + + optimize_GUARD_FALSE = optimize_GUARD_TRUE + optimize_GUARD_VALUE = optimize_GUARD_TRUE + + def optimize_INT_AND(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + self.emit_operation(op) + + r = self.getvalue(op.result) + if v2.is_constant(): + val = v2.box.getint() + if val >= 0: + r.intbound.intersect(IntBound(0,val)) + elif v1.is_constant(): + val = v1.box.getint() + if val >= 0: + r.intbound.intersect(IntBound(0,val)) + + def optimize_INT_SUB(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + self.emit_operation(op) + r = self.getvalue(op.result) + r.intbound.intersect(v1.intbound.sub_bound(v2.intbound)) + + def optimize_INT_ADD(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + self.emit_operation(op) + r = self.getvalue(op.result) + r.intbound.intersect(v1.intbound.add_bound(v2.intbound)) + + def optimize_INT_MUL(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + self.emit_operation(op) + r = self.getvalue(op.result) + r.intbound.intersect(v1.intbound.mul_bound(v2.intbound)) + + def optimize_INT_ADD_OVF(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + resbound = v1.intbound.add_bound(v2.intbound) + if resbound.has_lower and resbound.has_upper and \ + self.nextop().opnum == rop.GUARD_NO_OVERFLOW: + # Transform into INT_ADD and remove guard + op.opnum = rop.INT_ADD + self.skip_nextop() + self.optimize_INT_ADD(op) + else: + self.emit_operation(op) + r = self.getvalue(op.result) + r.intbound.intersect(resbound) + + def optimize_INT_SUB_OVF(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + resbound = v1.intbound.sub_bound(v2.intbound) + if resbound.has_lower and resbound.has_upper and \ + self.nextop().opnum == rop.GUARD_NO_OVERFLOW: + # Transform into INT_SUB and remove guard + op.opnum = rop.INT_SUB + self.skip_nextop() + self.optimize_INT_SUB(op) + else: + self.emit_operation(op) + r = self.getvalue(op.result) + r.intbound.intersect(resbound) + + def optimize_INT_MUL_OVF(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + resbound = v1.intbound.mul_bound(v2.intbound) + if resbound.has_lower and resbound.has_upper and \ + self.nextop().opnum == rop.GUARD_NO_OVERFLOW: + # Transform into INT_MUL and remove guard + op.opnum = rop.INT_MUL + self.skip_nextop() + self.optimize_INT_MUL(op) + else: + self.emit_operation(op) + r = self.getvalue(op.result) + r.intbound.intersect(resbound) + + def optimize_INT_LT(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + if v1.intbound.known_lt(v2.intbound): + self.make_constant_int(op.result, 1) + elif v1.intbound.known_ge(v2.intbound): + self.make_constant_int(op.result, 0) + else: + self.emit_operation(op) + + def optimize_INT_GT(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + if v1.intbound.known_gt(v2.intbound): + self.make_constant_int(op.result, 1) + elif v1.intbound.known_le(v2.intbound): + self.make_constant_int(op.result, 0) + else: + self.emit_operation(op) + + def optimize_INT_LE(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + if v1.intbound.known_le(v2.intbound): + self.make_constant_int(op.result, 1) + elif v1.intbound.known_gt(v2.intbound): + self.make_constant_int(op.result, 0) + else: + self.emit_operation(op) + + def optimize_INT_GE(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + if v1.intbound.known_ge(v2.intbound): + self.make_constant_int(op.result, 1) + elif v1.intbound.known_lt(v2.intbound): + self.make_constant_int(op.result, 0) + else: + self.emit_operation(op) + + def optimize_INT_EQ(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + if v1.intbound.known_gt(v2.intbound): + self.make_constant_int(op.result, 0) + elif v1.intbound.known_lt(v2.intbound): + self.make_constant_int(op.result, 0) + else: + self.emit_operation(op) + + def optimize_INT_NE(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + if v1.intbound.known_gt(v2.intbound): + self.make_constant_int(op.result, 1) + elif v1.intbound.known_lt(v2.intbound): + self.make_constant_int(op.result, 1) + else: + self.emit_operation(op) + + def make_int_lt(self, args): + v1 = self.getvalue(args[0]) + v2 = self.getvalue(args[1]) + if v1.intbound.make_lt(v2.intbound): + self.propagate_bounds_backward(args[0]) + if v2.intbound.make_gt(v1.intbound): + self.propagate_bounds_backward(args[1]) + + + def make_int_le(self, args): + v1 = self.getvalue(args[0]) + v2 = self.getvalue(args[1]) + if v1.intbound.make_le(v2.intbound): + self.propagate_bounds_backward(args[0]) + if v2.intbound.make_ge(v1.intbound): + self.propagate_bounds_backward(args[1]) + + def make_int_gt(self, args): + self.make_int_lt([args[1], args[0]]) + + def make_int_ge(self, args): + self.make_int_le([args[1], args[0]]) + + def propagate_bounds_INT_LT(self, op): + r = self.getvalue(op.result) + if r.is_constant(): + if r.box.same_constant(CONST_1): + self.make_int_lt(op.args) + else: + self.make_int_ge(op.args) + + def propagate_bounds_INT_GT(self, op): + r = self.getvalue(op.result) + if r.is_constant(): + if r.box.same_constant(CONST_1): + self.make_int_gt(op.args) + else: + self.make_int_le(op.args) + + def propagate_bounds_INT_LE(self, op): + r = self.getvalue(op.result) + if r.is_constant(): + if r.box.same_constant(CONST_1): + self.make_int_le(op.args) + else: + self.make_int_gt(op.args) + + def propagate_bounds_INT_GE(self, op): + r = self.getvalue(op.result) + if r.is_constant(): + if r.box.same_constant(CONST_1): + self.make_int_ge(op.args) + else: + self.make_int_lt(op.args) + + def propagate_bounds_INT_EQ(self, op): + r = self.getvalue(op.result) + if r.is_constant(): + if r.box.same_constant(CONST_1): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + if v1.intbound.intersect(v2.intbound): + self.propagate_bounds_backward(op.args[0]) + if v2.intbound.intersect(v1.intbound): + self.propagate_bounds_backward(op.args[1]) + + def propagate_bounds_INT_NE(self, op): + r = self.getvalue(op.result) + if r.is_constant(): + if r.box.same_constant(CONST_0): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + if v1.intbound.intersect(v2.intbound): + self.propagate_bounds_backward(op.args[0]) + if v2.intbound.intersect(v1.intbound): + self.propagate_bounds_backward(op.args[1]) + + def propagate_bounds_INT_ADD(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + r = self.getvalue(op.result) + b = r.intbound.sub_bound(v2.intbound) + if v1.intbound.intersect(b): + self.propagate_bounds_backward(op.args[0]) + b = r.intbound.sub_bound(v1.intbound) + if v2.intbound.intersect(b): + self.propagate_bounds_backward(op.args[1]) + + def propagate_bounds_INT_SUB(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + r = self.getvalue(op.result) + b = r.intbound.add_bound(v2.intbound) + if v1.intbound.intersect(b): + self.propagate_bounds_backward(op.args[0]) + b = r.intbound.sub_bound(v1.intbound).mul(-1) + if v2.intbound.intersect(b): + self.propagate_bounds_backward(op.args[1]) + + def propagate_bounds_INT_MUL(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + r = self.getvalue(op.result) + b = r.intbound.div_bound(v2.intbound) + if v1.intbound.intersect(b): + self.propagate_bounds_backward(op.args[0]) + b = r.intbound.div_bound(v1.intbound) + if v2.intbound.intersect(b): + self.propagate_bounds_backward(op.args[1]) + + propagate_bounds_INT_ADD_OVF = propagate_bounds_INT_ADD + propagate_bounds_INT_SUB_OVF = propagate_bounds_INT_SUB + propagate_bounds_INT_MUL_OVF = propagate_bounds_INT_MUL + +optimize_ops = _findall(OptIntBounds, 'optimize_') +propagate_bounds_ops = _findall(OptIntBounds, 'propagate_bounds_') Added: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/intutils.py ============================================================================== --- (empty file) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/intutils.py Thu Sep 2 19:53:23 2010 @@ -0,0 +1,207 @@ +from pypy.rlib.rarithmetic import ovfcheck + +class IntBound(object): + _attrs_ = ('has_upper', 'has_lower', 'upper', 'lower') + + def __init__(self, lower, upper): + self.has_upper = True + self.has_lower = True + self.upper = upper + self.lower = lower + + # Returns True if the bound was updated + def make_le(self, other): + if other.has_upper: + if not self.has_upper or other.upper < self.upper: + self.has_upper = True + self.upper = other.upper + return True + return False + + def make_lt(self, other): + return self.make_le(other.add(-1)) + + def make_ge(self, other): + if other.has_lower: + if not self.has_lower or other.lower > self.lower: + self.has_lower = True + self.lower = other.lower + return True + return False + + def make_gt(self, other): + return self.make_ge(other.add(1)) + + def make_constant(self, value): + self.has_lower = True + self.has_upper = True + self.lower = value + self.upper = value + + def make_unbounded(self): + self.has_lower = False + self.has_upper = False + + def known_lt(self, other): + if self.has_upper and other.has_lower and self.upper < other.lower: + return True + return False + + def known_le(self, other): + if self.has_upper and other.has_lower and self.upper <= other.lower: + return True + return False + + def known_gt(self, other): + return other.known_lt(self) + + def known_ge(self, other): + return other.known_le(self) + + def intersect(self, other): + r = False + + if other.has_lower: + if other.lower > self.lower or not self.has_lower: + self.lower = other.lower + self.has_lower = True + r = True + + if other.has_upper: + if other.upper < self.upper or not self.has_upper: + self.upper = other.upper + self.has_upper = True + r = True + + return r + + def add(self, offset): + res = self.copy() + try: + res.lower = ovfcheck(res.lower + offset) + except OverflowError: + res.has_lower = False + try: + res.upper = ovfcheck(res.upper + offset) + except OverflowError: + res.has_upper = False + return res + + def mul(self, value): + return self.mul_bound(IntBound(value, value)) + + def add_bound(self, other): + res = self.copy() + if other.has_upper: + try: + res.upper = ovfcheck(res.upper + other.upper) + except OverflowError: + res.has_upper = False + else: + res.has_upper = False + if other.has_lower: + try: + res.lower = ovfcheck(res.lower + other.lower) + except OverflowError: + res.has_lower = False + else: + res.has_lower = False + return res + + def sub_bound(self, other): + res = self.copy() + if other.has_lower: + try: + res.upper = ovfcheck(res.upper - other.lower) + except OverflowError: + res.has_upper = False + else: + res.has_upper = False + if other.has_upper: + try: + res.lower = ovfcheck(res.lower - other.upper) + except OverflowError: + res.has_lower = False + else: + res.has_lower = False + return res + + def mul_bound(self, other): + if self.has_upper and self.has_lower and \ + other.has_upper and other.has_lower: + try: + vals = (ovfcheck(self.upper * other.upper), + ovfcheck(self.upper * other.lower), + ovfcheck(self.lower * other.upper), + ovfcheck(self.lower * other.lower)) + return IntBound(min4(vals), max4(vals)) + except OverflowError: + return IntUnbounded() + else: + return IntUnbounded() + + def div_bound(self, other): + if self.has_upper and self.has_lower and \ + other.has_upper and other.has_lower and \ + not other.contains(0): + try: + vals = (ovfcheck(self.upper / other.upper), + ovfcheck(self.upper / other.lower), + ovfcheck(self.lower / other.upper), + ovfcheck(self.lower / other.lower)) + return IntBound(min4(vals), max4(vals)) + except OverflowError: + return IntUnbounded() + else: + return IntUnbounded() + + def contains(self, val): + if self.has_lower and val < self.lower: + return False + if self.has_upper and val > self.upper: + return False + return True + + def __repr__(self): + if self.has_lower: + l = '%4d' % self.lower + else: + l = '-Inf' + if self.has_upper: + u = '%3d' % self.upper + else: + u = 'Inf' + return '%s <= x <= %s' % (l, u) + + def copy(self): + res = IntBound(self.lower, self.upper) + res.has_lower = self.has_lower + res.has_upper = self.has_upper + return res + +class IntUpperBound(IntBound): + def __init__(self, upper): + self.has_upper = True + self.has_lower = False + self.upper = upper + self.lower = 0 + +class IntLowerBound(IntBound): + def __init__(self, lower): + self.has_upper = False + self.has_lower = True + self.upper = 0 + self.lower = lower + +class IntUnbounded(IntBound): + def __init__(self): + self.has_upper = False + self.has_lower = False + self.upper = 0 + self.lower = 0 + +def min4(t): + return min(min(t[0], t[1]), min(t[2], t[3])) + +def max4(t): + return max(max(t[0], t[1]), max(t[2], t[3])) Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py Thu Sep 2 19:53:23 2010 @@ -17,209 +17,17 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.lltypesystem import lltype from pypy.jit.metainterp.history import AbstractDescr, make_hashable_int -from pypy.rlib.rarithmetic import ovfcheck - -import sys -MAXINT = sys.maxint -MININT = -sys.maxint - 1 - -class IntBound(object): - _attrs_ = ('has_upper', 'has_lower', 'upper', 'lower') - - def __init__(self, lower, upper): - self.has_upper = True - self.has_lower = True - self.upper = upper - self.lower = lower - - # Returns True if the bound was updated - def make_le(self, other): - if other.has_upper: - if not self.has_upper or other.upper < self.upper: - self.has_upper = True - self.upper = other.upper - return True - return False - - def make_lt(self, other): - return self.make_le(other.add(-1)) - - def make_ge(self, other): - if other.has_lower: - if not self.has_lower or other.lower > self.lower: - self.has_lower = True - self.lower = other.lower - return True - return False - - def make_gt(self, other): - return self.make_ge(other.add(1)) - - def known_lt(self, other): - if self.has_upper and other.has_lower and self.upper < other.lower: - return True - return False - - def known_le(self, other): - if self.has_upper and other.has_lower and self.upper <= other.lower: - return True - return False - - def known_gt(self, other): - return other.known_lt(self) - - def known_ge(self, other): - return other.known_le(self) - - def intersect(self, other): - r = False - - if other.has_lower: - if other.lower > self.lower or not self.has_lower: - self.lower = other.lower - self.has_lower = True - r = True - - if other.has_upper: - if other.upper < self.upper or not self.has_upper: - self.upper = other.upper - self.has_upper = True - r = True - - return r - - def add(self, offset): - res = self.copy() - try: - res.lower = ovfcheck(res.lower + offset) - except OverflowError: - res.has_lower = False - try: - res.upper = ovfcheck(res.upper + offset) - except OverflowError: - res.has_upper = False - return res - - def mul(self, value): - return self.mul_bound(IntBound(value, value)) - - def add_bound(self, other): - res = self.copy() - if other.has_upper: - try: - res.upper = ovfcheck(res.upper + other.upper) - except OverflowError: - res.has_upper = False - else: - res.has_upper = False - if other.has_lower: - try: - res.lower = ovfcheck(res.lower + other.lower) - except OverflowError: - res.has_lower = False - else: - res.has_lower = False - return res - - def sub_bound(self, other): - res = self.copy() - if other.has_lower: - try: - res.upper = ovfcheck(res.upper - other.lower) - except OverflowError: - res.has_upper = False - else: - res.has_upper = False - if other.has_upper: - try: - res.lower = ovfcheck(res.lower - other.upper) - except OverflowError: - res.has_lower = False - else: - res.has_lower = False - return res - - def mul_bound(self, other): - if self.has_upper and self.has_lower and \ - other.has_upper and other.has_lower: - try: - vals = (ovfcheck(self.upper * other.upper), - ovfcheck(self.upper * other.lower), - ovfcheck(self.lower * other.upper), - ovfcheck(self.lower * other.lower)) - return IntBound(min4(vals), max4(vals)) - except OverflowError: - return IntUnbounded() - else: - return IntUnbounded() - - def div_bound(self, other): - if self.has_upper and self.has_lower and \ - other.has_upper and other.has_lower and \ - not other.contains(0): - try: - vals = (ovfcheck(self.upper / other.upper), - ovfcheck(self.upper / other.lower), - ovfcheck(self.lower / other.upper), - ovfcheck(self.lower / other.lower)) - return IntBound(min4(vals), max4(vals)) - except OverflowError: - return IntUnbounded() - else: - return IntUnbounded() - - def contains(self, val): - if self.has_lower and val < self.lower: - return False - if self.has_upper and val > self.upper: - return False - return True - - def __repr__(self): - if self.has_lower: - l = '%4d' % self.lower - else: - l = '-Inf' - if self.has_upper: - u = '%3d' % self.upper - else: - u = 'Inf' - return '%s <= x <= %s' % (l, u) - - def copy(self): - res = IntBound(self.lower, self.upper) - res.has_lower = self.has_lower - res.has_upper = self.has_upper - return res - - - -class IntUpperBound(IntBound): - def __init__(self, upper): - self.has_upper = True - self.has_lower = False - self.upper = upper - self.lower = 0 - -class IntLowerBound(IntBound): - def __init__(self, lower): - self.has_upper = False - self.has_lower = True - self.upper = 0 - self.lower = lower - -class IntUnbounded(IntBound): - def __init__(self): - self.has_upper = False - self.has_lower = False - self.upper = 0 - self.lower = 0 +from intutils import IntBound, IntUnbounded LEVEL_UNKNOWN = '\x00' LEVEL_NONNULL = '\x01' LEVEL_KNOWNCLASS = '\x02' # might also mean KNOWNARRAYDESCR, for arrays LEVEL_CONSTANT = '\x03' +import sys +MAXINT = sys.maxint +MININT = -sys.maxint - 1 + class OptValue(object): _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound') last_guard_index = -1 @@ -263,7 +71,8 @@ self.box = constbox self.level = LEVEL_CONSTANT try: - self.intbound = IntBound(self.box.getint(),self. box.getint()) + val = self.box.getint() + self.intbound = IntBound(val, val) except NotImplementedError: self.intbound = IntUnbounded() @@ -580,7 +389,7 @@ class Optimization(object): def propagate_forward(self, op): - raise NotImplemented + raise NotImplementedError def emit_operation(self, op): self.next_optimization.propagate_forward(op) @@ -591,13 +400,25 @@ def make_constant(self, box, constbox): return self.optimizer.make_constant(box, constbox) + def make_constant_int(self, box, intconst): + return self.optimizer.make_constant_int(box, intconst) + def make_equal_to(self, box, value): return self.optimizer.make_equal_to(box, value) - + + def pure(self, opnum, args, result): + op = ResOperation(opnum, args, result) + self.optimizer.pure_operations[self.optimizer.make_args_key(op)] = op + + def nextop(self): + return self.optimizer.loop.operations[self.optimizer.i + 1] + + def skip_nextop(self): + self.optimizer.i += 1 class Optimizer(Optimization): - def __init__(self, metainterp_sd, loop, optimizations=()): + def __init__(self, metainterp_sd, loop, optimizations=[]): self.metainterp_sd = metainterp_sd self.cpu = metainterp_sd.cpu self.loop = loop @@ -753,25 +574,6 @@ self.optimize_default(op) - def propagate_bounds_backward(self, box): - # FIXME: This takes care of the instruction where box is the reuslt - # but the bounds produced by all instructions where box is - # an argument might also be tighten - v = self.getvalue(box) - b = v.intbound - if b.has_lower and b.has_upper and b.lower == b.upper: - v.make_constant(ConstInt(b.lower)) - - try: - op = self.producer[box] - except KeyError: - return - opnum = op.opnum - for value, func in propagate_bounds_ops: - if opnum == value: - func(self, op) - break - def emit_operation(self, op): self.heap_op_optimizer.emitting_operation(op) self._emit_operation(op) @@ -864,7 +666,7 @@ # otherwise, the operation remains self.emit_operation(op) - + def optimize_JUMP(self, op): orgop = self.loop.operations[-1] exitargs = [] @@ -889,7 +691,6 @@ if emit_operation: self.emit_operation(op) value.make_constant(constbox) - self.propagate_bounds_backward(op.args[0]) def optimize_GUARD_ISNULL(self, op): value = self.getvalue(op.args[0]) @@ -1224,305 +1025,7 @@ self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result, op.descr)) - def optimize_INT_AND(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) - if v1.is_null() or v2.is_null(): - self.make_constant_int(op.result, 0) - else: - self.optimize_default(op) - r = self.getvalue(op.result) - if v2.is_constant(): - val = v2.box.getint() - if val >= 0: - r.intbound.intersect(IntBound(0,val)) - elif v1.is_constant(): - val = v1.box.getint() - if val >= 0: - r.intbound.intersect(IntBound(0,val)) - - def optimize_INT_OR(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) - if v1.is_null(): - self.make_equal_to(op.result, v2) - elif v2.is_null(): - self.make_equal_to(op.result, v1) - else: - self.optimize_default(op) - - def pure(self, opnum, args, result): - op = ResOperation(opnum, args, result) - self.pure_operations[self.make_args_key(op)] = op - - def optimize_INT_SUB(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) - if v2.is_constant() and v2.box.getint() == 0: - self.make_equal_to(op.result, v1) - else: - self.optimize_default(op) - r = self.getvalue(op.result) - r.intbound.intersect(v1.intbound.sub_bound(v2.intbound)) - - # Synthesize the reverse ops for optimize_default to reuse - self.pure(rop.INT_ADD, [op.result, op.args[1]], op.args[0]) - self.pure(rop.INT_SUB, [op.args[0], op.result], op.args[1]) - - def optimize_INT_ADD(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) - # If one side of the op is 0 the result is the other side. - if v1.is_constant() and v1.box.getint() == 0: - self.make_equal_to(op.result, v2) - elif v2.is_constant() and v2.box.getint() == 0: - self.make_equal_to(op.result, v1) - else: - self.optimize_default(op) - r = self.getvalue(op.result) - r.intbound.intersect(v1.intbound.add_bound(v2.intbound)) - - # Synthesize the reverse op for optimize_default to reuse - self.pure(rop.INT_SUB, [op.result, op.args[1]], op.args[0]) - self.pure(rop.INT_SUB, [op.result, op.args[0]], op.args[1]) - - def optimize_INT_MUL(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) - # If one side of the op is 0 the result is the other side. - if v1.is_constant() and v1.box.getint() == 1: - self.make_equal_to(op.result, v2) - elif v2.is_constant() and v2.box.getint() == 1: - self.make_equal_to(op.result, v1) - elif (v1.is_constant() and v1.box.getint() == 0) or \ - (v2.is_constant() and v2.box.getint() == 0): - self.make_constant_int(op.result, 0) - else: - self.optimize_default(op) - r = self.getvalue(op.result) - r.intbound.intersect(v1.intbound.mul_bound(v2.intbound)) - - def optimize_INT_ADD_OVF(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) - resbound = v1.intbound.add_bound(v2.intbound) - if resbound.has_lower and resbound.has_upper and \ - self.loop.operations[self.i+1].opnum == rop.GUARD_NO_OVERFLOW: - # Transform into INT_ADD and remove guard - op.opnum = rop.INT_ADD - self.i += 1 - self.optimize_INT_ADD(op) - else: - self.optimize_default(op) - r = self.getvalue(op.result) - r.intbound.intersect(resbound) - - def optimize_INT_SUB_OVF(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) - resbound = v1.intbound.sub_bound(v2.intbound) - if resbound.has_lower and resbound.has_upper and \ - self.loop.operations[self.i+1].opnum == rop.GUARD_NO_OVERFLOW: - # Transform into INT_SUB and remove guard - op.opnum = rop.INT_SUB - self.i += 1 - self.optimize_INT_SUB(op) - else: - self.optimize_default(op) - r = self.getvalue(op.result) - r.intbound.intersect(resbound) - - def optimize_INT_MUL_OVF(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) - resbound = v1.intbound.mul_bound(v2.intbound) - if resbound.has_lower and resbound.has_upper and \ - self.loop.operations[self.i+1].opnum == rop.GUARD_NO_OVERFLOW: - # Transform into INT_MUL and remove guard - op.opnum = rop.INT_MUL - self.i += 1 - self.optimize_INT_MUL(op) - else: - self.optimize_default(op) - r = self.getvalue(op.result) - r.intbound.intersect(resbound) - - def optimize_INT_LT(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) - if v1.intbound.known_lt(v2.intbound): - self.make_constant_int(op.result, 1) - elif v1.intbound.known_ge(v2.intbound): - self.make_constant_int(op.result, 0) - else: - self.optimize_default(op) - - def optimize_INT_GT(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) - if v1.intbound.known_gt(v2.intbound): - self.make_constant_int(op.result, 1) - elif v1.intbound.known_le(v2.intbound): - self.make_constant_int(op.result, 0) - else: - self.optimize_default(op) - - def optimize_INT_LE(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) - if v1.intbound.known_le(v2.intbound): - self.make_constant_int(op.result, 1) - elif v1.intbound.known_gt(v2.intbound): - self.make_constant_int(op.result, 0) - else: - self.optimize_default(op) - - def optimize_INT_GE(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) - if v1.intbound.known_ge(v2.intbound): - self.make_constant_int(op.result, 1) - elif v1.intbound.known_lt(v2.intbound): - self.make_constant_int(op.result, 0) - else: - self.optimize_default(op) - - def optimize_INT_EQ(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) - if v1.intbound.known_gt(v2.intbound): - self.make_constant_int(op.result, 0) - elif v1.intbound.known_lt(v2.intbound): - self.make_constant_int(op.result, 0) - else: - self.optimize_default(op) - - def optimize_INT_NE(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) - if v1.intbound.known_gt(v2.intbound): - self.make_constant_int(op.result, 1) - elif v1.intbound.known_lt(v2.intbound): - self.make_constant_int(op.result, 1) - else: - self.optimize_default(op) - - def make_int_lt(self, args): - v1 = self.getvalue(args[0]) - v2 = self.getvalue(args[1]) - if v1.intbound.make_lt(v2.intbound): - self.propagate_bounds_backward(args[0]) - if v2.intbound.make_gt(v1.intbound): - self.propagate_bounds_backward(args[1]) - - - def make_int_le(self, args): - v1 = self.getvalue(args[0]) - v2 = self.getvalue(args[1]) - if v1.intbound.make_le(v2.intbound): - self.propagate_bounds_backward(args[0]) - if v2.intbound.make_ge(v1.intbound): - self.propagate_bounds_backward(args[1]) - - def make_int_gt(self, args): - self.make_int_lt([args[1], args[0]]) - - def make_int_ge(self, args): - self.make_int_le([args[1], args[0]]) - - def propagate_bounds_INT_LT(self, op): - r = self.getvalue(op.result) - if r.is_constant(): - if r.box.same_constant(CONST_1): - self.make_int_lt(op.args) - else: - self.make_int_ge(op.args) - - def propagate_bounds_INT_GT(self, op): - r = self.getvalue(op.result) - if r.is_constant(): - if r.box.same_constant(CONST_1): - self.make_int_gt(op.args) - else: - self.make_int_le(op.args) - - def propagate_bounds_INT_LE(self, op): - r = self.getvalue(op.result) - if r.is_constant(): - if r.box.same_constant(CONST_1): - self.make_int_le(op.args) - else: - self.make_int_gt(op.args) - - def propagate_bounds_INT_GE(self, op): - r = self.getvalue(op.result) - if r.is_constant(): - if r.box.same_constant(CONST_1): - self.make_int_ge(op.args) - else: - self.make_int_lt(op.args) - - def propagate_bounds_INT_EQ(self, op): - r = self.getvalue(op.result) - if r.is_constant(): - if r.box.same_constant(CONST_1): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) - if v1.intbound.intersect(v2.intbound): - self.propagate_bounds_backward(op.args[0]) - if v2.intbound.intersect(v1.intbound): - self.propagate_bounds_backward(op.args[1]) - - def propagate_bounds_INT_NE(self, op): - r = self.getvalue(op.result) - if r.is_constant(): - if r.box.same_constant(CONST_0): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) - if v1.intbound.intersect(v2.intbound): - self.propagate_bounds_backward(op.args[0]) - if v2.intbound.intersect(v1.intbound): - self.propagate_bounds_backward(op.args[1]) - - def propagate_bounds_INT_ADD(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) - r = self.getvalue(op.result) - b = r.intbound.sub_bound(v2.intbound) - if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) - b = r.intbound.sub_bound(v1.intbound) - if v2.intbound.intersect(b): - self.propagate_bounds_backward(op.args[1]) - - def propagate_bounds_INT_SUB(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) - r = self.getvalue(op.result) - b = r.intbound.add_bound(v2.intbound) - if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) - b = r.intbound.sub_bound(v1.intbound).mul(-1) - if v2.intbound.intersect(b): - self.propagate_bounds_backward(op.args[1]) - - def propagate_bounds_INT_MUL(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) - r = self.getvalue(op.result) - b = r.intbound.div_bound(v2.intbound) - if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) - b = r.intbound.div_bound(v1.intbound) - if v2.intbound.intersect(b): - self.propagate_bounds_backward(op.args[1]) - - propagate_bounds_INT_ADD_OVF = propagate_bounds_INT_ADD - propagate_bounds_INT_SUB_OVF = propagate_bounds_INT_SUB - propagate_bounds_INT_MUL_OVF = propagate_bounds_INT_MUL - optimize_ops = _findall(Optimizer, 'optimize_') -propagate_bounds_ops = _findall(Optimizer, 'propagate_bounds_') class CachedArrayItems(object): @@ -1761,8 +1264,3 @@ write=True) -def min4(t): - return min(min(t[0], t[1]), min(t[2], t[3])) - -def max4(t): - return max(max(t[0], t[1]), max(t[2], t[3])) Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/rewrite.py Thu Sep 2 19:53:23 2010 @@ -1,8 +1,10 @@ from optimizer import Optimization, CONST_1, CONST_0 from pypy.jit.metainterp.resoperation import opboolinvers, opboolreflex from pypy.jit.metainterp.history import ConstInt +from pypy.jit.metainterp.optimizeutil import _findall +from pypy.jit.metainterp.resoperation import rop, ResOperation -class Rewrite(Optimization): +class OptRewrite(Optimization): """Rewrite operations into equvivialent, already executed operations or constants. """ @@ -11,7 +13,14 @@ args = self.optimizer.make_args_key(op) if self.find_rewritable_bool(op, args): return - self.emit_operation(op) + + opnum = op.opnum + for value, func in optimize_ops: + if opnum == value: + func(self, op) + break + else: + self.emit_operation(op) def try_boolinvers(self, op, targs): oldop = self.optimizer.pure_operations.get(targs, None) @@ -38,7 +47,7 @@ pass try: - oldopnum = opboolreflex[op.opnum] + oldopnum = opboolreflex[op.opnum] # FIXME: add INT_ADD, INT_MUL targs = [args[1], args[0], ConstInt(oldopnum)] oldop = self.optimizer.pure_operations.get(targs, None) if oldop is not None and oldop.descr is op.descr: @@ -57,6 +66,69 @@ return False + def optimize_INT_AND(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + if v1.is_null() or v2.is_null(): + self.make_constant_int(op.result, 0) + else: + self.emit_operation(op) + + def optimize_INT_OR(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + if v1.is_null(): + self.make_equal_to(op.result, v2) + elif v2.is_null(): + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + + def optimize_INT_SUB(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + if v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + + # Synthesize the reverse ops for optimize_default to reuse + self.pure(rop.INT_ADD, [op.result, op.args[1]], op.args[0]) + self.pure(rop.INT_SUB, [op.args[0], op.result], op.args[1]) + + def optimize_INT_ADD(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + + # If one side of the op is 0 the result is the other side. + if v1.is_constant() and v1.box.getint() == 0: + self.make_equal_to(op.result, v2) + elif v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + + # Synthesize the reverse op for optimize_default to reuse + self.pure(rop.INT_SUB, [op.result, op.args[1]], op.args[0]) + self.pure(rop.INT_SUB, [op.result, op.args[0]], op.args[1]) + + def optimize_INT_MUL(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + + # If one side of the op is 1 the result is the other side. + if v1.is_constant() and v1.box.getint() == 1: + self.make_equal_to(op.result, v2) + elif v2.is_constant() and v2.box.getint() == 1: + self.make_equal_to(op.result, v1) + elif (v1.is_constant() and v1.box.getint() == 0) or \ + (v2.is_constant() and v2.box.getint() == 0): + self.make_constant_int(op.result, 0) + else: + self.emit_operation(op) + + +optimize_ops = _findall(OptRewrite, 'optimize_') Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_intbound.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_intbound.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_intbound.py Thu Sep 2 19:53:23 2010 @@ -1,4 +1,4 @@ -from pypy.jit.metainterp.optimizeopt.optimizer import IntBound, IntUpperBound, \ +from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUpperBound, \ IntLowerBound, IntUnbounded from copy import copy From arigo at codespeak.net Thu Sep 2 19:57:14 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 2 Sep 2010 19:57:14 +0200 (CEST) Subject: [pypy-svn] r76844 - in pypy/branch/better-map-instances/pypy: config interpreter objspace/std objspace/std/test Message-ID: <20100902175714.B6BDC282BDC@codespeak.net> Author: arigo Date: Thu Sep 2 19:57:12 2010 New Revision: 76844 Modified: pypy/branch/better-map-instances/pypy/config/pypyoption.py pypy/branch/better-map-instances/pypy/interpreter/pycode.py pypy/branch/better-map-instances/pypy/interpreter/pyopcode.py pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py Log: (cfbolz, arigo) Add a cache for fast instance attribute lookups with mapdicts. This is only the non-jitted path. In the fast case, there is no dict lookup at all (neither on the instance nor on the type). There is one "XXX fix me" left. Modified: pypy/branch/better-map-instances/pypy/config/pypyoption.py ============================================================================== --- pypy/branch/better-map-instances/pypy/config/pypyoption.py (original) +++ pypy/branch/better-map-instances/pypy/config/pypyoption.py Thu Sep 2 19:57:12 2010 @@ -244,6 +244,8 @@ requires=[("objspace.std.withshadowtracking", False), ("objspace.std.withinlineddict", False), ("objspace.std.withsharingdict", False), + ("objspace.std.getattributeshortcut", True), + ("objspace.std.withtypeversion", True), ]), BoolOption("withrangelist", Modified: pypy/branch/better-map-instances/pypy/interpreter/pycode.py ============================================================================== --- pypy/branch/better-map-instances/pypy/interpreter/pycode.py (original) +++ pypy/branch/better-map-instances/pypy/interpreter/pycode.py Thu Sep 2 19:57:12 2010 @@ -117,6 +117,10 @@ self._compute_flatcall() + if self.space.config.objspace.std.withmapdict: + from pypy.objspace.std.mapdict import init_mapdict_cache + init_mapdict_cache(self) + def _freeze_(self): if self.magic == cpython_magic: raise Exception("CPython host codes should not be rendered") Modified: pypy/branch/better-map-instances/pypy/interpreter/pyopcode.py ============================================================================== --- pypy/branch/better-map-instances/pypy/interpreter/pyopcode.py (original) +++ pypy/branch/better-map-instances/pypy/interpreter/pyopcode.py Thu Sep 2 19:57:12 2010 @@ -710,9 +710,14 @@ def LOAD_ATTR(self, nameindex, next_instr): "obj.attributename" - w_attributename = self.getname_w(nameindex) w_obj = self.popvalue() - w_value = self.space.getattr(w_obj, w_attributename) + if (self.space.config.objspace.std.withmapdict + and not jit.we_are_jitted()): + from pypy.objspace.std.mapdict import LOAD_ATTR_caching + w_value = LOAD_ATTR_caching(self.getcode(), w_obj, nameindex) + else: + w_attributename = self.getname_w(nameindex) + w_value = self.space.getattr(w_obj, w_attributename) self.pushvalue(w_value) LOAD_ATTR._always_inline_ = True Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Thu Sep 2 19:57:12 2010 @@ -23,6 +23,9 @@ def delete(self, obj, selector): return None + def index(self, selector): + return -1 + def copy(self, obj): raise NotImplementedError("abstract base class") @@ -194,7 +197,7 @@ return self.back.write(obj, selector, w_value) def delete(self, obj, selector): - if self.selector == selector: + if selector == self.selector: # ok, attribute is deleted return self.back.copy(obj) new_obj = self.back.delete(obj, selector) @@ -202,6 +205,11 @@ self._copy_attr(obj, new_obj) return new_obj + def index(self, selector): + if selector == self.selector: + return self.position + return self.back.index(selector) + def copy(self, obj): new_obj = self.back.copy(obj) self._copy_attr(obj, new_obj) @@ -248,7 +256,8 @@ DICT = 0 SPECIAL = 1 -SLOTS_STARTING_FROM = 2 +INVALID = 2 +SLOTS_STARTING_FROM = 3 class Object(W_Root): # slightly evil to make it inherit from W_Root @@ -441,3 +450,77 @@ w_attr = self.space.wrap(attr) return w_attr, self.w_obj.getdictvalue(self.space, attr) return None, None + +# ____________________________________________________________ +# Magic caching + +# XXX we also would like getdictvalue_attr_is_in_class() above + +class CacheEntry(object): + map = None + version_tag = None + index = 0 + success_counter = 0 + failure_counter = 0 + +INVALID_CACHE_ENTRY = CacheEntry() +INVALID_CACHE_ENTRY.map = AbstractAttribute() # different from any real map + +def init_mapdict_cache(pycode): + num_entries = len(pycode.co_names_w) + pycode._mapdict_caches = [INVALID_CACHE_ENTRY] * num_entries + +def LOAD_ATTR_caching(pycode, w_obj, nameindex): + # this whole mess is to make the interpreter quite a bit faster; it's not + # used if we_are_jitted(). + entry = pycode._mapdict_caches[nameindex] + map = w_obj._get_mapdict_map() + if map is entry.map: + w_type = map.get_terminator().w_cls # XXX fix me, too slow + version_tag = w_type.version_tag() + if version_tag is entry.version_tag: + # everything matches, it's incredibly fast + if pycode.space.config.objspace.std.withmethodcachecounter: + entry.success_counter += 1 + return w_obj._get_mapdict_storage()[entry.index] + return LOAD_ATTR_slowpath(pycode, w_obj, nameindex, map) +LOAD_ATTR_caching._always_inline_ = True + +def LOAD_ATTR_slowpath(pycode, w_obj, nameindex, map): + space = pycode.space + w_name = pycode.co_names_w[nameindex] + if map is not None: + w_type = map.get_terminator().w_cls + w_descr = w_type.getattribute_if_not_from_object() + if w_descr is not None: + return space._handle_getattribute(w_descr, w_obj, w_name) + + version_tag = w_type.version_tag() + if version_tag is not None: + name = space.str_w(w_name) + w_descr = w_type.lookup(name) + selector = ("", INVALID) + if w_descr is not None and space.is_data_descr(w_descr): + from pypy.interpreter.typedef import Member + descr = space.interpclass_w(w_descr) + if isinstance(descr, Member): + selector = ("slot", SLOTS_STARTING_FROM + descr.index) + else: + selector = (name, DICT) + if selector[1] != INVALID: + index = map.index(selector) + if index >= 0: + entry = pycode._mapdict_caches[nameindex] + if entry is INVALID_CACHE_ENTRY: + entry = CacheEntry() + pycode._mapdict_caches[nameindex] = entry + entry.map = map + entry.version_tag = version_tag + entry.index = index + if space.config.objspace.std.withmethodcachecounter: + entry.failure_counter += 1 + return w_obj._get_mapdict_storage()[index] + if space.config.objspace.std.withmethodcachecounter: + INVALID_CACHE_ENTRY.failure_counter += 1 + return space.getattr(w_obj, w_name) +LOAD_ATTR_slowpath._dont_inline_ = True Modified: pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py Thu Sep 2 19:57:12 2010 @@ -1,4 +1,4 @@ -from pypy.conftest import gettestobjspace +from pypy.conftest import gettestobjspace, option from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictMultiObject from pypy.objspace.std.mapdict import * @@ -498,3 +498,171 @@ assert a.y == 2 assert a.__dict__ is d assert isinstance(a, B) + + +class AppTestWithMapDictAndCounters(object): + def setup_class(cls): + from pypy.interpreter import gateway + cls.space = gettestobjspace( + **{"objspace.std.withmapdict": True, + "objspace.std.withmethodcachecounter": True}) + # + def check(space, w_func, name): + w_code = space.getattr(w_func, space.wrap('func_code')) + nameindex = map(space.str_w, w_code.co_names_w).index(name) + entry = w_code._mapdict_caches[nameindex] + entry.failure_counter = 0 + entry.success_counter = 0 + INVALID_CACHE_ENTRY.failure_counter = 0 + # + w_res = space.call_function(w_func) + assert space.eq_w(w_res, space.wrap(42)) + # + entry = w_code._mapdict_caches[nameindex] + if entry is INVALID_CACHE_ENTRY: + failures = successes = 0 + else: + failures = entry.failure_counter + successes = entry.success_counter + globalfailures = INVALID_CACHE_ENTRY.failure_counter + return space.wrap((failures, successes, globalfailures)) + check.unwrap_spec = [gateway.ObjSpace, gateway.W_Root, str] + cls.w_check = cls.space.wrap(gateway.interp2app(check)) + + def test_simple(self): + class A(object): + pass + a = A() + a.x = 42 + def f(): + return a.x + # + res = self.check(f, 'x') + assert res == (1, 0, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + # + A.y = 5 # unrelated, but changes the version_tag + res = self.check(f, 'x') + assert res == (1, 0, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + # + A.x = 8 # but shadowed by 'a.x' + res = self.check(f, 'x') + assert res == (1, 0, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + + def test_property(self): + class A(object): + x = property(lambda self: 42) + a = A() + def f(): + return a.x + # + res = self.check(f, 'x') + assert res == (0, 0, 1) + res = self.check(f, 'x') + assert res == (0, 0, 1) + res = self.check(f, 'x') + assert res == (0, 0, 1) + + def test_slots(self): + class A(object): + __slots__ = ['x'] + a = A() + a.x = 42 + def f(): + return a.x + # + res = self.check(f, 'x') + assert res == (1, 0, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + + def test_two_attributes(self): + class A(object): + pass + a = A() + a.x = 40 + a.y = -2 + def f(): + return a.x - a.y + # + res = self.check(f, 'x') + assert res == (1, 0, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + # + res = self.check(f, 'y') + assert res == (0, 1, 0) + res = self.check(f, 'y') + assert res == (0, 1, 0) + res = self.check(f, 'y') + assert res == (0, 1, 0) + + def test_two_maps(self): + class A(object): + pass + a = A() + a.x = 42 + def f(): + return a.x + # + res = self.check(f, 'x') + assert res == (1, 0, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + # + a.y = "foo" # changes the map + res = self.check(f, 'x') + assert res == (1, 0, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + # + a.y = "bar" # does not change the map any more + res = self.check(f, 'x') + assert res == (0, 1, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + res = self.check(f, 'x') + assert res == (0, 1, 0) + + def test_custom_metaclass(self): + class A(object): + class __metaclass__(type): + pass + a = A() + a.x = 42 + def f(): + return a.x + # + res = self.check(f, 'x') + assert res == (0, 0, 1) + res = self.check(f, 'x') + assert res == (0, 0, 1) + res = self.check(f, 'x') + assert res == (0, 0, 1) From arigo at codespeak.net Thu Sep 2 20:07:19 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 2 Sep 2010 20:07:19 +0200 (CEST) Subject: [pypy-svn] r76845 - in pypy/branch/better-map-instances/pypy/objspace/std: . test Message-ID: <20100902180719.86C79282BDC@codespeak.net> Author: arigo Date: Thu Sep 2 20:07:15 2010 New Revision: 76845 Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py pypy/branch/better-map-instances/pypy/objspace/std/typeobject.py Log: (cfbolz, arigo) Speed up reading the class of an instance as a mapdict, by duplicating this info on all PlainAttributes. Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Thu Sep 2 20:07:15 2010 @@ -1,4 +1,4 @@ -from pypy.rlib import jit +from pypy.rlib import jit, objectmodel from pypy.interpreter.baseobjspace import W_Root from pypy.objspace.std.dictmultiobject import W_DictMultiObject @@ -14,6 +14,10 @@ cache_attrs = None _size_estimate = 0 + def __init__(self, space, w_cls): + self.space = space + self.w_cls = w_cls + def read(self, obj, selector): raise NotImplementedError("abstract base class") @@ -83,9 +87,6 @@ class Terminator(AbstractAttribute): _immutable_fields_ = ['w_cls'] - def __init__(self, w_cls, space): - self.w_cls = w_cls - self.space = space def read(self, obj, selector): return None @@ -117,9 +118,9 @@ class DictTerminator(Terminator): _immutable_fields_ = ['devolved_dict_terminator'] - def __init__(self, w_cls, space): - Terminator.__init__(self, w_cls, space) - self.devolved_dict_terminator = DevolvedDictTerminator(w_cls, space) + def __init__(self, space, w_cls): + Terminator.__init__(self, space, w_cls) + self.devolved_dict_terminator = DevolvedDictTerminator(space, w_cls) def materialize_r_dict(self, space, obj, w_d): result = Object() @@ -176,6 +177,7 @@ class PlainAttribute(AbstractAttribute): _immutable_fields_ = ['selector', 'position', 'back'] def __init__(self, selector, back): + AbstractAttribute.__init__(self, back.space, back.w_cls) self.selector = selector self.position = back.length() self.back = back @@ -315,7 +317,7 @@ assert flag def getclass(self, space): - return self._get_mapdict_map().get_terminator().w_cls + return self._get_mapdict_map().w_cls def setclass(self, space, w_cls): new_obj = self._get_mapdict_map().set_terminator(self, w_cls.terminator) @@ -464,7 +466,8 @@ failure_counter = 0 INVALID_CACHE_ENTRY = CacheEntry() -INVALID_CACHE_ENTRY.map = AbstractAttribute() # different from any real map +INVALID_CACHE_ENTRY.map = objectmodel.instantiate(AbstractAttribute) + # different from any real map ^^^ def init_mapdict_cache(pycode): num_entries = len(pycode.co_names_w) @@ -476,8 +479,7 @@ entry = pycode._mapdict_caches[nameindex] map = w_obj._get_mapdict_map() if map is entry.map: - w_type = map.get_terminator().w_cls # XXX fix me, too slow - version_tag = w_type.version_tag() + version_tag = map.w_cls.version_tag() if version_tag is entry.version_tag: # everything matches, it's incredibly fast if pycode.space.config.objspace.std.withmethodcachecounter: @@ -490,7 +492,7 @@ space = pycode.space w_name = pycode.co_names_w[nameindex] if map is not None: - w_type = map.get_terminator().w_cls + w_type = map.w_cls w_descr = w_type.getattribute_if_not_from_object() if w_descr is not None: return space._handle_getattribute(w_descr, w_obj, w_name) Modified: pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py Thu Sep 2 20:07:15 2010 @@ -8,9 +8,9 @@ def __init__(self, hasdict=True): self.hasdict = True if hasdict: - self.terminator = DictTerminator(self, space) + self.terminator = DictTerminator(space, self) else: - self.terminator = NoDictTerminator(self, space) + self.terminator = NoDictTerminator(space, self) def instantiate(self, sp=None): if sp is None: @@ -24,7 +24,14 @@ hasdict = False def test_plain_attribute(): - aa = PlainAttribute(("b", DICT), PlainAttribute(("a", DICT), Terminator(None, None))) + space = " " + w_cls = "class" + aa = PlainAttribute(("b", DICT), + PlainAttribute(("a", DICT), + Terminator(space, w_cls))) + assert aa.space is space + assert aa.w_cls is w_cls + obj = Object() obj.map, obj.storage = aa, [10, 20] assert obj.getdictvalue(space, "a") == 10 Modified: pypy/branch/better-map-instances/pypy/objspace/std/typeobject.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/typeobject.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/typeobject.py Thu Sep 2 20:07:15 2010 @@ -121,9 +121,9 @@ if space.config.objspace.std.withmapdict: from pypy.objspace.std.mapdict import DictTerminator, NoDictTerminator if w_self.hasdict: - w_self.terminator = DictTerminator(w_self, space) + w_self.terminator = DictTerminator(space, w_self) else: - w_self.terminator = NoDictTerminator(w_self, space) + w_self.terminator = NoDictTerminator(space, w_self) def mutated(w_self): space = w_self.space From arigo at codespeak.net Fri Sep 3 10:16:04 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 3 Sep 2010 10:16:04 +0200 (CEST) Subject: [pypy-svn] r76846 - pypy/branch/no-_immutable_/pypy/rpython/memory/test Message-ID: <20100903081604.4C6FB282BEB@codespeak.net> Author: arigo Date: Fri Sep 3 10:16:01 2010 New Revision: 76846 Modified: pypy/branch/no-_immutable_/pypy/rpython/memory/test/test_gctypelayout.py Log: Fix test. Modified: pypy/branch/no-_immutable_/pypy/rpython/memory/test/test_gctypelayout.py ============================================================================== --- pypy/branch/no-_immutable_/pypy/rpython/memory/test/test_gctypelayout.py (original) +++ pypy/branch/no-_immutable_/pypy/rpython/memory/test/test_gctypelayout.py Fri Sep 3 10:16:01 2010 @@ -101,7 +101,7 @@ accessor = rclass.FieldListAccessor() S3 = lltype.GcStruct('S', ('x', PT), ('y', PT), hints={'immutable_fields': accessor}) - accessor.initialize(S3, ['x']) + accessor.initialize(S3, {'x': ''}) # s1 = lltype.malloc(S1) adr = llmemory.cast_ptr_to_adr(s1) From arigo at codespeak.net Fri Sep 3 10:16:51 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 3 Sep 2010 10:16:51 +0200 (CEST) Subject: [pypy-svn] r76847 - pypy/branch/no-_immutable_/pypy/translator/backendopt/test Message-ID: <20100903081651.24797282BEB@codespeak.net> Author: arigo Date: Fri Sep 3 10:16:48 2010 New Revision: 76847 Modified: pypy/branch/no-_immutable_/pypy/translator/backendopt/test/test_constfold.py Log: Fix test. Modified: pypy/branch/no-_immutable_/pypy/translator/backendopt/test/test_constfold.py ============================================================================== --- pypy/branch/no-_immutable_/pypy/translator/backendopt/test/test_constfold.py (original) +++ pypy/branch/no-_immutable_/pypy/translator/backendopt/test/test_constfold.py Fri Sep 3 10:16:48 2010 @@ -49,7 +49,7 @@ accessor = rclass.FieldListAccessor() S2 = lltype.GcStruct('S2', ('x', lltype.Signed), hints={'immutable_fields': accessor}) - accessor.initialize(S2, ['x']) + accessor.initialize(S2, {'x': ''}) test_simple(S2) From cfbolz at codespeak.net Fri Sep 3 11:26:42 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Fri, 3 Sep 2010 11:26:42 +0200 (CEST) Subject: [pypy-svn] r76848 - pypy/branch/better-map-instances/pypy/module/cpyext Message-ID: <20100903092642.7325E282BEB@codespeak.net> Author: cfbolz Date: Fri Sep 3 11:26:39 2010 New Revision: 76848 Modified: pypy/branch/better-map-instances/pypy/module/cpyext/classobject.py Log: adapt cpyext to new interface Modified: pypy/branch/better-map-instances/pypy/module/cpyext/classobject.py ============================================================================== --- pypy/branch/better-map-instances/pypy/module/cpyext/classobject.py (original) +++ pypy/branch/better-map-instances/pypy/module/cpyext/classobject.py Fri Sep 3 11:26:39 2010 @@ -17,14 +17,18 @@ instance.""" if not PyClass_Check(space, w_class): return PyErr_BadInternalCall(space) - return W_InstanceObject(space, w_class, w_dict) + w_result = w_class.instantiate(space) + if w_dict is not None: + w_result.setdict(space, w_dict) + return w_result @cpython_api([PyObject, PyObject], PyObject, error=CANNOT_FAIL) def _PyInstance_Lookup(space, w_instance, w_name): + name = space.str_w(w_name) assert isinstance(w_instance, W_InstanceObject) - w_result = space.finditem(w_instance.w_dict, w_name) + w_result = w_instance.getdictvalue(space, name) if w_result is not None: return w_result - return w_instance.w_class.lookup(space, w_name) + return w_instance.w_class.lookup(space, name) From cfbolz at codespeak.net Fri Sep 3 11:54:09 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Fri, 3 Sep 2010 11:54:09 +0200 (CEST) Subject: [pypy-svn] r76849 - in pypy/branch/better-map-instances/pypy: module/__builtin__ module/__builtin__/test objspace/std Message-ID: <20100903095409.D79E8282B9C@codespeak.net> Author: cfbolz Date: Fri Sep 3 11:54:07 2010 New Revision: 76849 Modified: pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py pypy/branch/better-map-instances/pypy/module/__builtin__/test/test_classobj.py pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Log: (cfbolz, arigo looking): make mapdict work with old-style classes Modified: pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py ============================================================================== --- pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py (original) +++ pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py Fri Sep 3 11:54:07 2010 @@ -5,6 +5,8 @@ from pypy.interpreter.typedef import TypeDef from pypy.interpreter.argument import Arguments from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.typedef import GetSetProperty, descr_get_dict +from pypy.interpreter.typedef import descr_set_dict from pypy.rlib.rarithmetic import r_uint, intmask from pypy.rlib.objectmodel import compute_identity_hash from pypy.rlib.debug import make_sure_not_resized @@ -402,7 +404,7 @@ if name and name[0] == "_": if name == '__dict__': # use setdict to raise the error - self.setdict(space, None) + self.setdict(space, space.w_None) return elif name == '__class__': # use set_oldstyle_class to raise the error @@ -706,6 +708,14 @@ rmeth, unwrap_spec=["self", ObjSpace, W_Root]) + +def descr_del_dict(space, w_inst): + # use setdict to raise the error + self.setdict(space, space.w_None) + +dict_descr = GetSetProperty(descr_get_dict, descr_set_dict, descr_del_dict) +dict_descr.name = '__dict__' + W_InstanceObject.typedef = TypeDef("instance", __new__ = interp2app(descr_instance_new), __getattribute__ = interp2app(W_InstanceObject.descr_getattribute, @@ -757,6 +767,7 @@ unwrap_spec=['self', ObjSpace]), __del__ = interp2app(W_InstanceObject.descr_del, unwrap_spec=['self', ObjSpace]), + __dict__ = dict_descr, **rawdict ) W_InstanceObject.typedef.acceptable_as_base_class = False Modified: pypy/branch/better-map-instances/pypy/module/__builtin__/test/test_classobj.py ============================================================================== --- pypy/branch/better-map-instances/pypy/module/__builtin__/test/test_classobj.py (original) +++ pypy/branch/better-map-instances/pypy/module/__builtin__/test/test_classobj.py Fri Sep 3 11:54:07 2010 @@ -815,3 +815,22 @@ a = 1 b = 2 assert self.is_strdict(A) + +class AppTestOldStyleMapDict(AppTestOldstyle): + def setup_class(cls): + cls.space = gettestobjspace(**{"objspace.std.withmapdict": True}) + if option.runappdirect: + py.test.skip("can only be run on py.py") + def has_mapdict(space, w_inst): + return space.wrap(w_inst._get_mapdict_map() is not None) + cls.w_has_mapdict = cls.space.wrap(gateway.interp2app(has_mapdict)) + + + def test_has_mapdict(self): + class A: + def __init__(self): + self.x = 42 + a = A() + assert a.x == 42 + assert self.has_mapdict(a) + Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Fri Sep 3 11:54:07 2010 @@ -324,8 +324,10 @@ self._become(new_obj) def user_setup(self, space, w_subtype): + from pypy.module.__builtin__.interp_classobj import W_InstanceObject self.space = space - assert not self.typedef.hasdict + assert (not self.typedef.hasdict or + self.typedef is W_InstanceObject.typedef) self._init_empty(w_subtype.terminator) def getslotvalue(self, index): From arigo at codespeak.net Fri Sep 3 12:57:58 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 3 Sep 2010 12:57:58 +0200 (CEST) Subject: [pypy-svn] r76850 - pypy/branch/markcompact/pypy/rpython/memory/gc Message-ID: <20100903105758.C25F0282B9C@codespeak.net> Author: arigo Date: Fri Sep 3 12:57:55 2010 New Revision: 76850 Modified: pypy/branch/markcompact/pypy/rpython/memory/gc/markcompact.py Log: Increase the default of min_next_collect_after. Let it be configurable at run-time with the env var PYPY_MARKCOMPACTGC_MIN. Modified: pypy/branch/markcompact/pypy/rpython/memory/gc/markcompact.py ============================================================================== --- pypy/branch/markcompact/pypy/rpython/memory/gc/markcompact.py (original) +++ pypy/branch/markcompact/pypy/rpython/memory/gc/markcompact.py Fri Sep 3 12:57:55 2010 @@ -75,7 +75,7 @@ # a big mmap. The process does not actually consume that space until # needed, of course. TRANSLATION_PARAMS = {'space_size': int((1 + 15.0/16)*1024*1024*1024), - 'min_next_collect_after': 4*1024*1024} # 4MB + 'min_next_collect_after': 16*1024*1024} # 16MB malloc_zero_filled = False inline_simple_malloc = True @@ -111,9 +111,12 @@ return next def setup(self): - envsize = max_size_from_env() + envsize = read_from_env('PYPY_MARKCOMPACTGC_MAX') if envsize >= 4096: self.space_size = envsize & ~4095 + mincollect = read_from_env('PYPY_MARKCOMPACTGC_MIN') + if mincollect >= 4096: + self.min_next_collect_after = mincollect #self.program_start_time = time.time() self.space = llarena.arena_malloc(self.space_size, False) @@ -675,6 +678,3 @@ class CannotAllocateGCArena(Exception): pass - -def max_size_from_env(): - return read_from_env('PYPY_MARKCOMPACTGC_MAX') From cfbolz at codespeak.net Fri Sep 3 12:59:45 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Fri, 3 Sep 2010 12:59:45 +0200 (CEST) Subject: [pypy-svn] r76851 - in pypy/branch/better-map-instances/pypy/module/__builtin__: . test Message-ID: <20100903105945.40A08282B9C@codespeak.net> Author: cfbolz Date: Fri Sep 3 12:59:42 2010 New Revision: 76851 Modified: pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py pypy/branch/better-map-instances/pypy/module/__builtin__/test/test_classobj.py Log: that which is not tested is broken Modified: pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py ============================================================================== --- pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py (original) +++ pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py Fri Sep 3 12:59:42 2010 @@ -711,7 +711,7 @@ def descr_del_dict(space, w_inst): # use setdict to raise the error - self.setdict(space, space.w_None) + w_inst.setdict(space, space.w_None) dict_descr = GetSetProperty(descr_get_dict, descr_set_dict, descr_del_dict) dict_descr.name = '__dict__' Modified: pypy/branch/better-map-instances/pypy/module/__builtin__/test/test_classobj.py ============================================================================== --- pypy/branch/better-map-instances/pypy/module/__builtin__/test/test_classobj.py (original) +++ pypy/branch/better-map-instances/pypy/module/__builtin__/test/test_classobj.py Fri Sep 3 12:59:42 2010 @@ -778,6 +778,21 @@ else: assert 0, "should have raised" + def test_dict_descriptor(self): + import sys + if not hasattr(sys, 'pypy_objspaceclass'): + skip("on CPython old-style instances don't have a __dict__ descriptor") + class A: + pass + a = A() + a.x = 1 + descr = type(a).__dict__['__dict__'] + assert descr.__get__(a) == {'x': 1} + descr.__set__(a, {'x': 2}) + assert a.x == 2 + raises(TypeError, descr.__delete__, a) + + class AppTestOldStyleSharing(AppTestOldstyle): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withsharingdict": True}) From arigo at codespeak.net Fri Sep 3 13:39:15 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 3 Sep 2010 13:39:15 +0200 (CEST) Subject: [pypy-svn] r76852 - in pypy/trunk/pypy: rpython/lltypesystem rpython/lltypesystem/test rpython/memory rpython/memory/gc rpython/memory/gc/test rpython/memory/test translator translator/c/test Message-ID: <20100903113915.4C864282B9C@codespeak.net> Author: arigo Date: Fri Sep 3 13:39:11 2010 New Revision: 76852 Modified: pypy/trunk/pypy/rpython/lltypesystem/llgroup.py pypy/trunk/pypy/rpython/lltypesystem/llmemory.py pypy/trunk/pypy/rpython/lltypesystem/opimpl.py pypy/trunk/pypy/rpython/lltypesystem/rffi.py pypy/trunk/pypy/rpython/lltypesystem/test/test_llgroup.py pypy/trunk/pypy/rpython/memory/gc/base.py pypy/trunk/pypy/rpython/memory/gc/generation.py pypy/trunk/pypy/rpython/memory/gc/markcompact.py pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py pypy/trunk/pypy/rpython/memory/gctypelayout.py pypy/trunk/pypy/rpython/memory/gcwrapper.py pypy/trunk/pypy/rpython/memory/test/test_gc.py pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py pypy/trunk/pypy/translator/c/test/test_newgc.py pypy/trunk/pypy/translator/exceptiontransform.py Log: Merge branch/markcompact: re-enable and improve the mark&compact GC. Its usefulness is still a bit unclear. Modified: pypy/trunk/pypy/rpython/lltypesystem/llgroup.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/llgroup.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/llgroup.py Fri Sep 3 13:39:11 2010 @@ -136,6 +136,10 @@ assert (other & CombinedSymbolic.MASK) == 0 return CombinedSymbolic(self.lowpart, self.rest - other) + def __rshift__(self, other): + assert other >= HALFSHIFT + return self.rest >> other + def __eq__(self, other): if (isinstance(other, CombinedSymbolic) and self.lowpart is other.lowpart): Modified: pypy/trunk/pypy/rpython/lltypesystem/llmemory.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/llmemory.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/llmemory.py Fri Sep 3 13:39:11 2010 @@ -361,19 +361,27 @@ # ____________________________________________________________ +def _sizeof_none(TYPE): + assert not TYPE._is_varsize() + return ItemOffset(TYPE) +_sizeof_none._annspecialcase_ = 'specialize:memo' + +def _sizeof_int(TYPE, n): + "NOT_RPYTHON" + if isinstance(TYPE, lltype.Struct): + return FieldOffset(TYPE, TYPE._arrayfld) + \ + itemoffsetof(TYPE._flds[TYPE._arrayfld], n) + else: + raise Exception("don't know how to take the size of a %r"%TYPE) + def sizeof(TYPE, n=None): if n is None: - assert not TYPE._is_varsize() - return ItemOffset(TYPE) + return _sizeof_none(TYPE) + elif isinstance(TYPE, lltype.Array): + return itemoffsetof(TYPE) + _sizeof_none(TYPE.OF) * n else: - if isinstance(TYPE, lltype.Array): - return itemoffsetof(TYPE, n) - elif isinstance(TYPE, lltype.Struct): - return FieldOffset(TYPE, TYPE._arrayfld) + \ - itemoffsetof(TYPE._flds[TYPE._arrayfld], n) - else: - raise Exception("don't know how to take the size of a %r"%TYPE) -sizeof._annspecialcase_ = 'specialize:memo' # only for n == None + return _sizeof_int(TYPE, n) +sizeof._annspecialcase_ = 'specialize:arg(0)' def offsetof(TYPE, fldname): assert fldname in TYPE._flds Modified: pypy/trunk/pypy/rpython/lltypesystem/opimpl.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/opimpl.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/opimpl.py Fri Sep 3 13:39:11 2010 @@ -197,6 +197,18 @@ assert isinstance(y, int) return intmask(x - y) +def op_int_ge(x, y): + # special case for 'AddressOffset >= 0' + assert isinstance(x, (int, llmemory.AddressOffset)) + assert isinstance(y, int) + return x >= y + +def op_int_lt(x, y): + # special case for 'AddressOffset < 0' + assert isinstance(x, (int, llmemory.AddressOffset)) + assert isinstance(y, int) + return x < y + def op_int_between(a, b, c): assert lltype.typeOf(a) is lltype.Signed assert lltype.typeOf(b) is lltype.Signed @@ -222,6 +234,13 @@ assert isinstance(y, (int, llmemory.AddressOffset)) return intmask(x * y) +def op_int_rshift(x, y): + if not isinstance(x, int): + from pypy.rpython.lltypesystem import llgroup + assert isinstance(x, llgroup.CombinedSymbolic) + assert isinstance(y, int) + return x >> y + def op_int_floordiv(x, y): assert isinstance(x, (int, llmemory.AddressOffset)) assert isinstance(y, (int, llmemory.AddressOffset)) Modified: pypy/trunk/pypy/rpython/lltypesystem/rffi.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/rffi.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/rffi.py Fri Sep 3 13:39:11 2010 @@ -593,9 +593,12 @@ """ str -> char* """ array = lltype.malloc(TYPEP.TO, len(s) + 1, flavor='raw') - for i in range(len(s)): + i = len(s) + array[i] = lastchar + i -= 1 + while i >= 0: array[i] = s[i] - array[len(s)] = lastchar + i -= 1 return array str2charp._annenforceargs_ = [strtype] Modified: pypy/trunk/pypy/rpython/lltypesystem/test/test_llgroup.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/test/test_llgroup.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/test/test_llgroup.py Fri Sep 3 13:39:11 2010 @@ -105,6 +105,8 @@ assert p == test.p1b assert cslist[0] & ~MASK == 0x45 << HALFSHIFT assert cslist[1] & ~MASK == 0x41 << HALFSHIFT + assert cslist[0] >> HALFSHIFT == 0x45 + assert cslist[1] >> (HALFSHIFT+1) == 0x41 >> 1 # return 42 return f Modified: pypy/trunk/pypy/rpython/memory/gc/base.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/base.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/base.py Fri Sep 3 13:39:11 2010 @@ -86,8 +86,7 @@ addr -= self.gcheaderbuilder.size_gc_header return llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR)) - def get_size(self, obj): - typeid = self.get_type_id(obj) + def _get_size_for_typeid(self, obj, typeid): size = self.fixed_size(typeid) if self.is_varsize(typeid): lenaddr = obj + self.varsize_offset_to_length(typeid) @@ -99,6 +98,9 @@ # gctypelayout.encode_type_shape() return size + def get_size(self, obj): + return self._get_size_for_typeid(obj, self.get_type_id(obj)) + def malloc(self, typeid, length=0, zero=False): """For testing. The interface used by the gctransformer is the four malloc_[fixed,var]size[_clear]() functions. @@ -218,7 +220,6 @@ pending = self._debug_pending while pending.non_empty(): obj = pending.pop() - self.debug_check_object(obj) self.trace(obj, self._debug_callback2, None) self._debug_seen.delete() self._debug_pending.delete() @@ -227,6 +228,7 @@ seen = self._debug_seen if not seen.contains(obj): seen.add(obj) + self.debug_check_object(obj) self._debug_pending.append(obj) def _debug_callback(self, root): obj = root.address[0] @@ -348,3 +350,23 @@ globals(), locals(), [classname]) GCClass = getattr(module, classname) return GCClass, GCClass.TRANSLATION_PARAMS + +def read_from_env(varname): + import os + value = os.environ.get(varname) + if value: + realvalue = value[:-1] + if value[-1] in 'kK': + factor = 1024 + elif value[-1] in 'mM': + factor = 1024*1024 + elif value[-1] in 'gG': + factor = 1024*1024*1024 + else: + factor = 1 + realvalue = value + try: + return int(float(realvalue) * factor) + except ValueError: + pass + return -1 Modified: pypy/trunk/pypy/rpython/memory/gc/generation.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/generation.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/generation.py Fri Sep 3 13:39:11 2010 @@ -2,6 +2,7 @@ from pypy.rpython.memory.gc.semispace import SemiSpaceGC from pypy.rpython.memory.gc.semispace import GCFLAG_EXTERNAL, GCFLAG_FORWARDED from pypy.rpython.memory.gc.semispace import GC_HASH_TAKEN_ADDR +from pypy.rpython.memory.gc.base import read_from_env from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage from pypy.rpython.lltypesystem import lltype, llmemory, llarena from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE @@ -625,18 +626,7 @@ import os def nursery_size_from_env(): - value = os.environ.get('PYPY_GENERATIONGC_NURSERY') - if value: - if value[-1] in 'kK': - factor = 1024 - value = value[:-1] - else: - factor = 1 - try: - return int(value) * factor - except ValueError: - pass - return -1 + return read_from_env('PYPY_GENERATIONGC_NURSERY') def best_nursery_size_for_L2cache(L2cache): # Heuristically, the best nursery size to choose is about half Modified: pypy/trunk/pypy/rpython/memory/gc/markcompact.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/markcompact.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/markcompact.py Fri Sep 3 13:39:11 2010 @@ -1,27 +1,17 @@ - -import time - from pypy.rpython.lltypesystem import lltype, llmemory, llarena, llgroup -from pypy.rpython.memory.gc.base import MovingGCBase +from pypy.rpython.memory.gc.base import MovingGCBase, read_from_env from pypy.rlib.debug import ll_assert, have_debug_prints from pypy.rlib.debug import debug_print, debug_start, debug_stop from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE from pypy.rpython.memory.support import get_address_stack, get_address_deque from pypy.rpython.memory.support import AddressDict from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage -from pypy.rlib.rarithmetic import ovfcheck +from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask from pypy.rpython.lltypesystem.lloperation import llop -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, running_on_llinterp from pypy.rpython.lltypesystem import rffi from pypy.rpython.memory.gcheader import GCHeaderBuilder -first_gcflag = 1 << 16 -GCFLAG_MARKBIT = first_gcflag << 0 -GCFLAG_HASHTAKEN = first_gcflag << 1 # someone already asked for the hash -GCFLAG_HASHFIELD = first_gcflag << 2 # we have an extra hash field - -memoryError = MemoryError() - # Mark'n'compact garbage collector # # main point of this GC is to save as much memory as possible @@ -34,41 +24,44 @@ # this gc works more or less like semispace, but has some essential # differencies. The main difference is that we have separate phases of # marking and assigning pointers, hence order of objects is preserved. -# This means we can reuse the same space if it did not grow enough. -# More importantly, in case we need to resize space we can copy it bit by -# bit, hence avoiding double memory consumption at peak times +# This means we can reuse the same space, overwriting it as we collect. -# so the algorithm itself is performed in 3 stages (module weakrefs and -# finalizers) +# so the algorithm itself is performed in 3 stages (modulo weakrefs and +# finalizers): # 1. We mark alive objects # 2. We walk all objects and assign forward pointers in the same order, # also updating all references -# 3. We compact the space by moving. In case we move to the same space, -# we use arena_new_view trick, which looks like new space to tests, -# but compiles to the same pointer. Also we use raw_memmove in case -# objects overlap. - -# Exact algorithm for space resizing: we keep allocated more space than needed -# (2x, can be even more), but it's full of zeroes. After each collection, -# we bump next_collect_after which is a marker where to start each collection. -# It should be exponential (but less than 2) from the size occupied by objects +# 3. We compact the space by moving. We use 'arena_new_view' trick, which +# looks like new space to tests, but compiles to the same pointer. +# Also we use raw_memmove in case the object overlaps with its destination. + +# After each collection, we bump 'next_collect_after' which is a marker +# where to start each collection. It should be exponential (but less +# than 2) from the size occupied by objects so far. # field optimization - we don't need forward pointer and flags at the same -# time. Instead we copy list of tids when we know how many objects are alive -# and store forward pointer there. +# time. Instead we copy the TIDs in a list when we know how many objects are +# alive, and store the forward pointer in the old object header. +first_gcflag_bit = LONG_BIT//2 +first_gcflag = 1 << first_gcflag_bit +GCFLAG_HASHTAKEN = first_gcflag << 0 # someone already asked for the hash +GCFLAG_HASHFIELD = first_gcflag << 1 # we have an extra hash field +# note that only the first 2 bits are preserved during a collection! +GCFLAG_MARKBIT = intmask(first_gcflag << (LONG_BIT//2-1)) +assert GCFLAG_MARKBIT < 0 # should be 0x80000000 + +GCFLAG_SAVED_HASHTAKEN = GCFLAG_HASHTAKEN >> first_gcflag_bit +GCFLAG_SAVED_HASHFIELD = GCFLAG_HASHFIELD >> first_gcflag_bit -# in case we need to grow space, we use -# current_space_size * FREE_SPACE_MULTIPLIER / FREE_SPACE_DIVIDER + needed -FREE_SPACE_MULTIPLIER = 3 -FREE_SPACE_DIVIDER = 2 -FREE_SPACE_ADD = 256 -# XXX adjust -GC_CLEARANCE = 32*1024 TID_TYPE = llgroup.HALFWORD BYTES_PER_TID = rffi.sizeof(TID_TYPE) +TID_BACKUP = rffi.CArray(TID_TYPE) + +def translated_to_c(): + return we_are_translated() and not running_on_llinterp class MarkCompactGC(MovingGCBase): @@ -77,37 +70,63 @@ withhash_flag_is_in_field = 'tid', GCFLAG_HASHFIELD # ^^^ all prebuilt objects have GCFLAG_HASHTAKEN, but only some have # GCFLAG_HASHFIELD (and then they are one word longer). - TID_BACKUP = lltype.Array(TID_TYPE, hints={'nolength':True}) - WEAKREF_OFFSETS = lltype.Array(lltype.Signed) + # The default space size is 1.9375 GB, i.e. almost 2 GB, allocated as + # a big mmap. The process does not actually consume that space until + # needed, of course. + TRANSLATION_PARAMS = {'space_size': int((1 + 15.0/16)*1024*1024*1024), + 'min_next_collect_after': 16*1024*1024} # 16MB - TRANSLATION_PARAMS = {'space_size': 8*1024*1024} # XXX adjust - - malloc_zero_filled = True + malloc_zero_filled = False inline_simple_malloc = True inline_simple_malloc_varsize = True - first_unused_gcflag = first_gcflag << 3 - total_collection_time = 0.0 - total_collection_count = 0 - - def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE, space_size=4096): - import py; py.test.skip("Disabled for now, sorry") - self.param_space_size = space_size + #total_collection_time = 0.0 + #total_collection_count = 0 + + free = NULL + next_collect_after = -1 + + def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE, space_size=4096, + min_next_collect_after=128): MovingGCBase.__init__(self, config, chunk_size) + self.space_size = space_size + self.min_next_collect_after = min_next_collect_after - def setup(self): - self.space_size = self.param_space_size - self.next_collect_after = self.param_space_size/2 # whatever... + def next_collection(self, used_space, num_objects_so_far, requested_size): + used_space += BYTES_PER_TID * num_objects_so_far + ll_assert(used_space <= self.space_size, + "used_space + num_objects_so_far overflow") + try: + next = (used_space // 3) * 2 + requested_size + except OverflowError: + next = self.space_size + if next < self.min_next_collect_after: + next = self.min_next_collect_after + if next > self.space_size - used_space: + next = self.space_size - used_space + # The value we return guarantees that used_space + next <= space_size, + # with 'BYTES_PER_TID*num_objects_so_far' included in used_space. + # Normally, the value we return should also be at least requested_size + # unless we are out of memory. + return next - self.program_start_time = time.time() - self.space = llarena.arena_malloc(self.space_size, True) - ll_assert(bool(self.space), "couldn't allocate arena") + def setup(self): + envsize = read_from_env('PYPY_MARKCOMPACTGC_MAX') + if envsize >= 4096: + self.space_size = envsize & ~4095 + mincollect = read_from_env('PYPY_MARKCOMPACTGC_MIN') + if mincollect >= 4096: + self.min_next_collect_after = mincollect + + #self.program_start_time = time.time() + self.space = llarena.arena_malloc(self.space_size, False) + if not self.space: + raise CannotAllocateGCArena self.free = self.space - self.top_of_space = self.space + self.next_collect_after MovingGCBase.setup(self) self.objects_with_finalizers = self.AddressDeque() - self.objects_with_weakrefs = self.AddressStack() - self.tid_backup = lltype.nullptr(self.TID_BACKUP) + self.tid_backup = lltype.nullptr(TID_BACKUP) + self.next_collect_after = self.next_collection(0, 0, 0) def init_gc_object(self, addr, typeid16, flags=0): hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR)) @@ -115,216 +134,204 @@ def init_gc_object_immortal(self, addr, typeid16, flags=0): hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR)) - flags |= GCFLAG_HASHTAKEN + flags |= GCFLAG_HASHTAKEN | GCFLAG_MARKBIT + # All prebuilt GC objects have the GCFLAG_MARKBIT always set. + # That's convenient to make the GC always think that they + # survive the current collection. hdr.tid = self.combine(typeid16, flags) - # XXX we can store forward_ptr to itself, if we fix C backend - # so that get_forwarding_address(obj) returns - # obj itself if obj is a prebuilt object - def malloc_fixedsize_clear(self, typeid16, size, can_collect, - has_finalizer=False, contains_weakptr=False): - size_gc_header = self.gcheaderbuilder.size_gc_header - totalsize = size_gc_header + size - result = self.free - if raw_malloc_usage(totalsize) > self.top_of_space - result: - result = self.obtain_free_space(totalsize) + def _get_memory(self, totalsize): + # also counts the space that will be needed during the following + # collection to store the TID + requested_size = raw_malloc_usage(totalsize) + BYTES_PER_TID + self.next_collect_after -= requested_size + if self.next_collect_after < 0: + result = self.obtain_free_space(requested_size) + else: + result = self.free + self.free += totalsize llarena.arena_reserve(result, totalsize) + return result + _get_memory._always_inline_ = True + + def _get_totalsize_var(self, nonvarsize, itemsize, length): + try: + varsize = ovfcheck(itemsize * length) + except OverflowError: + raise MemoryError + # Careful to detect overflows. The following works even if varsize + # is almost equal to sys.maxint; morever, self.space_size is known + # to be at least 4095 bytes smaller than sys.maxint, so this function + # always raises instead of returning an integer >= sys.maxint-4095. + if (raw_malloc_usage(varsize) > self.space_size - + raw_malloc_usage(nonvarsize)): + raise MemoryError + return llarena.round_up_for_allocation(nonvarsize + varsize) + _get_totalsize_var._always_inline_ = True + + def _setup_object(self, result, typeid16, has_finalizer): + size_gc_header = self.gcheaderbuilder.size_gc_header self.init_gc_object(result, typeid16) - self.free += totalsize if has_finalizer: self.objects_with_finalizers.append(result + size_gc_header) - if contains_weakptr: - self.objects_with_weakrefs.append(result + size_gc_header) return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF) - + _setup_object._always_inline_ = True + + def malloc_fixedsize(self, typeid16, size, can_collect, + has_finalizer=False, contains_weakptr=False): + size_gc_header = self.gcheaderbuilder.size_gc_header + totalsize = size_gc_header + size + result = self._get_memory(totalsize) + return self._setup_object(result, typeid16, has_finalizer) + + def malloc_fixedsize_clear(self, typeid16, size, can_collect, + has_finalizer=False, contains_weakptr=False): + size_gc_header = self.gcheaderbuilder.size_gc_header + totalsize = size_gc_header + size + result = self._get_memory(totalsize) + llmemory.raw_memclear(result, totalsize) + return self._setup_object(result, typeid16, has_finalizer) + def malloc_varsize_clear(self, typeid16, length, size, itemsize, offset_to_length, can_collect): size_gc_header = self.gcheaderbuilder.size_gc_header nonvarsize = size_gc_header + size - try: - varsize = ovfcheck(itemsize * length) - totalsize = ovfcheck(nonvarsize + varsize) - except OverflowError: - raise memoryError - result = self.free - if raw_malloc_usage(totalsize) > self.top_of_space - result: - result = self.obtain_free_space(totalsize) - llarena.arena_reserve(result, totalsize) - self.init_gc_object(result, typeid16) + totalsize = self._get_totalsize_var(nonvarsize, itemsize, length) + result = self._get_memory(totalsize) + llmemory.raw_memclear(result, totalsize) (result + size_gc_header + offset_to_length).signed[0] = length - self.free = result + llarena.round_up_for_allocation(totalsize) - return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF) + return self._setup_object(result, typeid16, False) - def obtain_free_space(self, totalsize): - # a bit of tweaking to maximize the performance and minimize the - # amount of code in an inlined version of malloc_fixedsize_clear() - if not self.try_obtain_free_space(totalsize): - raise memoryError + def obtain_free_space(self, requested_size): + if self.free == NULL: + return self._emergency_initial_block(requested_size) + while True: + executed_some_finalizers = self.markcompactcollect(requested_size) + self.next_collect_after -= requested_size + if self.next_collect_after >= 0: + break # ok + else: + if executed_some_finalizers: + pass # try again to do a collection + else: + raise MemoryError return self.free obtain_free_space._dont_inline_ = True - def try_obtain_free_space(self, needed): - needed = raw_malloc_usage(needed) - while 1: - self.markcompactcollect(needed) - missing = needed - (self.top_of_space - self.free) - if missing < 0: - return True - - def new_space_size(self, occupied, needed): - res = (occupied * FREE_SPACE_MULTIPLIER / - FREE_SPACE_DIVIDER + FREE_SPACE_ADD + needed) - # align it to 4096, which is somewhat around page size - return ((res/4096) + 1) * 4096 - - def double_space_size(self, minimal_size): - while self.space_size <= minimal_size: - self.space_size *= 2 - toaddr = llarena.arena_malloc(self.space_size, True) - return toaddr - - def compute_alive_objects(self): - fromaddr = self.space - addraftercollect = self.space - num = 1 - while fromaddr < self.free: - size_gc_header = self.gcheaderbuilder.size_gc_header - tid = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR)).tid - obj = fromaddr + size_gc_header - objsize = self.get_size(obj) - objtotalsize = size_gc_header + objsize - if self.marked(obj): - copy_has_hash_field = ((tid & GCFLAG_HASHFIELD) != 0 or - ((tid & GCFLAG_HASHTAKEN) != 0 and - addraftercollect < fromaddr)) - addraftercollect += raw_malloc_usage(objtotalsize) - if copy_has_hash_field: - addraftercollect += llmemory.sizeof(lltype.Signed) - num += 1 - fromaddr += objtotalsize - if tid & GCFLAG_HASHFIELD: - fromaddr += llmemory.sizeof(lltype.Signed) - ll_assert(addraftercollect <= fromaddr, - "markcompactcollect() is trying to increase memory usage") - self.totalsize_of_objs = addraftercollect - self.space - return num + def _emergency_initial_block(self, requested_size): + # xxx before the GC is fully setup, we might get there. Hopefully + # we will only allocate a couple of strings, e.g. in read_from_env(). + # Just allocate them raw and leak them. + debug_start("gc-initial-block") + debug_print("leaking", requested_size, "bytes") + debug_stop("gc-initial-block") + return llmemory.raw_malloc(requested_size) def collect(self, gen=0): self.markcompactcollect() - - def markcompactcollect(self, needed=0): - start_time = self.debug_collect_start() + + def markcompactcollect(self, requested_size=0): + self.debug_collect_start(requested_size) self.debug_check_consistency() - self.to_see = self.AddressStack() - self.mark_roots_recursively() - if (self.objects_with_finalizers.non_empty() or - self.run_finalizers.non_empty()): - self.mark_objects_with_finalizers() - self._trace_and_mark() + # + # Mark alive objects + # + self.to_see = self.AddressDeque() + self.trace_from_roots() self.to_see.delete() - num_of_alive_objs = self.compute_alive_objects() - size_of_alive_objs = self.totalsize_of_objs - totalsize = self.new_space_size(size_of_alive_objs, needed + - num_of_alive_objs * BYTES_PER_TID) - tid_backup_size = (llmemory.sizeof(self.TID_BACKUP, 0) + - llmemory.sizeof(TID_TYPE) * num_of_alive_objs) - used_space_now = self.next_collect_after + raw_malloc_usage(tid_backup_size) - if totalsize >= self.space_size or used_space_now >= self.space_size: - toaddr = self.double_space_size(totalsize) - llarena.arena_reserve(toaddr + size_of_alive_objs, tid_backup_size) - self.tid_backup = llmemory.cast_adr_to_ptr( - toaddr + size_of_alive_objs, - lltype.Ptr(self.TID_BACKUP)) - resizing = True - else: - toaddr = llarena.arena_new_view(self.space) - llarena.arena_reserve(self.top_of_space, tid_backup_size) - self.tid_backup = llmemory.cast_adr_to_ptr( - self.top_of_space, - lltype.Ptr(self.TID_BACKUP)) - resizing = False - self.next_collect_after = totalsize - weakref_offsets = self.collect_weakref_offsets() - finaladdr = self.update_forward_pointers(toaddr, num_of_alive_objs) + # + # Prepare new views on the same memory + # + toaddr = llarena.arena_new_view(self.space) + maxnum = self.space_size - (self.free - self.space) + maxnum /= BYTES_PER_TID + llarena.arena_reserve(self.free, llmemory.sizeof(TID_BACKUP, maxnum)) + self.tid_backup = llmemory.cast_adr_to_ptr(self.free, + lltype.Ptr(TID_BACKUP)) + # + # Walk all objects and assign forward pointers in the same order, + # also updating all references + # + self.update_forward_pointers(toaddr, maxnum) if (self.run_finalizers.non_empty() or self.objects_with_finalizers.non_empty()): self.update_run_finalizers() - if self.objects_with_weakrefs.non_empty(): - self.invalidate_weakrefs(weakref_offsets) + self.update_objects_with_id() - self.compact(resizing) - if not resizing: - size = toaddr + self.space_size - finaladdr - llarena.arena_reset(finaladdr, size, True) - else: - if we_are_translated(): - # because we free stuff already in raw_memmove, we - # would get double free here. Let's free it anyway - llarena.arena_free(self.space) - llarena.arena_reset(toaddr + size_of_alive_objs, tid_backup_size, - True) - self.space = toaddr - self.free = finaladdr - self.top_of_space = toaddr + self.next_collect_after + self.compact() + # + self.tid_backup = lltype.nullptr(TID_BACKUP) + self.free = self.finaladdr + self.next_collect_after = self.next_collection(self.finaladdr - toaddr, + self.num_alive_objs, + requested_size) + # + if not translated_to_c(): + remaining_size = (toaddr + self.space_size) - self.finaladdr + llarena.arena_reset(self.finaladdr, remaining_size, False) + llarena.arena_free(self.space) + self.space = toaddr + # self.debug_check_consistency() - self.tid_backup = lltype.nullptr(self.TID_BACKUP) + self.debug_collect_finish() + if self.next_collect_after < 0: + raise MemoryError + # if self.run_finalizers.non_empty(): self.execute_finalizers() - self.debug_collect_finish(start_time) - - def collect_weakref_offsets(self): - weakrefs = self.objects_with_weakrefs - new_weakrefs = self.AddressStack() - weakref_offsets = lltype.malloc(self.WEAKREF_OFFSETS, - weakrefs.length(), flavor='raw') - i = 0 - while weakrefs.non_empty(): - obj = weakrefs.pop() - offset = self.weakpointer_offset(self.get_type_id(obj)) - weakref_offsets[i] = offset - new_weakrefs.append(obj) - i += 1 - self.objects_with_weakrefs = new_weakrefs - weakrefs.delete() - return weakref_offsets + return True # executed some finalizers + else: + return False # no finalizer executed - def debug_collect_start(self): - if have_debug_prints(): + def debug_collect_start(self, requested_size): + if 1:# have_debug_prints(): debug_start("gc-collect") debug_print() - debug_print(".----------- Full collection ------------------") - start_time = time.time() - return start_time - return -1 - - def debug_collect_finish(self, start_time): - if start_time != -1: - end_time = time.time() - elapsed_time = end_time - start_time - self.total_collection_time += elapsed_time - self.total_collection_count += 1 - total_program_time = end_time - self.program_start_time - ct = self.total_collection_time - cc = self.total_collection_count - debug_print("| number of collections so far ", - cc) - debug_print("| total collections per second: ", - cc / total_program_time) - debug_print("| total time in markcompact-collect: ", - ct, "seconds") - debug_print("| percentage collection<->total time:", - ct * 100.0 / total_program_time, "%") + debug_print(".----------- Full collection -------------------") + debug_print("| requested size:", + requested_size) + #start_time = time.time() + #return start_time + #return -1 + + def debug_collect_finish(self): + if 1:# start_time != -1: + #end_time = time.time() + #elapsed_time = end_time - start_time + #self.total_collection_time += elapsed_time + #self.total_collection_count += 1 + #total_program_time = end_time - self.program_start_time + #ct = self.total_collection_time + #cc = self.total_collection_count + #debug_print("| number of collections so far ", + # cc) + debug_print("| total space size ", + self.space_size) + debug_print("| number of objects alive ", + self.num_alive_objs) + debug_print("| used space size ", + self.free - self.space) + debug_print("| next collection after ", + self.next_collect_after) + #debug_print("| total collections per second: ", + # cc / total_program_time) + #debug_print("| total time in markcompact-collect: ", + # ct, "seconds") + #debug_print("| percentage collection<->total time:", + # ct * 100.0 / total_program_time, "%") debug_print("`----------------------------------------------") debug_stop("gc-collect") def update_run_finalizers(self): - run_finalizers = self.AddressDeque() - while self.run_finalizers.non_empty(): - obj = self.run_finalizers.popleft() - run_finalizers.append(self.get_forwarding_address(obj)) - self.run_finalizers.delete() - self.run_finalizers = run_finalizers + if self.run_finalizers.non_empty(): # uncommon case + run_finalizers = self.AddressDeque() + while self.run_finalizers.non_empty(): + obj = self.run_finalizers.popleft() + run_finalizers.append(self.get_forwarding_address(obj)) + self.run_finalizers.delete() + self.run_finalizers = run_finalizers + # objects_with_finalizers = self.AddressDeque() while self.objects_with_finalizers.non_empty(): obj = self.objects_with_finalizers.popleft() @@ -353,90 +360,156 @@ tid = self.header(addr).tid return llop.extract_ushort(llgroup.HALFWORD, tid) - def mark_roots_recursively(self): + def trace_from_roots(self): self.root_walker.walk_roots( - MarkCompactGC._mark_root_recursively, # stack roots - MarkCompactGC._mark_root_recursively, # static in prebuilt non-gc structures - MarkCompactGC._mark_root_recursively) # static in prebuilt gc objects + MarkCompactGC._mark_root, # stack roots + MarkCompactGC._mark_root, # static in prebuilt non-gc structures + MarkCompactGC._mark_root) # static in prebuilt gc objects + if (self.objects_with_finalizers.non_empty() or + self.run_finalizers.non_empty()): + self.trace_from_objects_with_finalizers() self._trace_and_mark() def _trace_and_mark(self): - # XXX depth-first tracing... it can consume a lot of rawmalloced - # memory for very long stacks in some cases while self.to_see.non_empty(): - obj = self.to_see.pop() + obj = self.to_see.popleft() self.trace(obj, self._mark_obj, None) def _mark_obj(self, pointer, ignored): - obj = pointer.address[0] - if self.marked(obj): - return - self.mark(obj) - self.to_see.append(obj) + self.mark(pointer.address[0]) - def _mark_root_recursively(self, root): + def _mark_root(self, root): self.mark(root.address[0]) - self.to_see.append(root.address[0]) def mark(self, obj): - self.header(obj).tid |= GCFLAG_MARKBIT + if not self.marked(obj): + self.header(obj).tid |= GCFLAG_MARKBIT + self.to_see.append(obj) def marked(self, obj): - return self.header(obj).tid & GCFLAG_MARKBIT + # should work both if tid contains a CombinedSymbolic (for dying + # objects, at this point), or a plain integer. + return MovingGCBase.header(self, obj).tid & GCFLAG_MARKBIT + + def toaddr_smaller_than_fromaddr(self, toaddr, fromaddr): + if translated_to_c(): + return toaddr < fromaddr + else: + # convert the addresses to integers, because they are + # theoretically not from the same arena + return toaddr - self.base_forwarding_addr < fromaddr - self.space - def update_forward_pointers(self, toaddr, num_of_alive_objs): - self.base_forwarding_addr = toaddr + def update_forward_pointers(self, toaddr, maxnum): + self.base_forwarding_addr = base_forwarding_addr = toaddr fromaddr = self.space size_gc_header = self.gcheaderbuilder.size_gc_header - i = 0 + num = 0 while fromaddr < self.free: hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR)) obj = fromaddr + size_gc_header - objsize = self.get_size(obj) - totalsize = size_gc_header + objsize - if not self.marked(obj): - self.set_null_forwarding_address(obj, i) - else: - llarena.arena_reserve(toaddr, totalsize) - self.set_forwarding_address(obj, toaddr, i) - toaddr += totalsize - i += 1 - fromaddr += totalsize + # compute the original object size, including the + # optional hash field + basesize = size_gc_header + self.get_size(obj) + totalsrcsize = basesize + if hdr.tid & GCFLAG_HASHFIELD: # already a hash field, copy it too + totalsrcsize += llmemory.sizeof(lltype.Signed) + # + if self.marked(obj): + # the object is marked as suriving. Compute the new object + # size + totaldstsize = totalsrcsize + if (hdr.tid & (GCFLAG_HASHTAKEN|GCFLAG_HASHFIELD) == + GCFLAG_HASHTAKEN): + # grow a new hash field -- with the exception: if + # the object actually doesn't move, don't + # (otherwise, we get a bogus toaddr > fromaddr) + if self.toaddr_smaller_than_fromaddr(toaddr, fromaddr): + totaldstsize += llmemory.sizeof(lltype.Signed) + # + if not translated_to_c(): + llarena.arena_reserve(toaddr, basesize) + if (raw_malloc_usage(totaldstsize) > + raw_malloc_usage(basesize)): + llarena.arena_reserve(toaddr + basesize, + llmemory.sizeof(lltype.Signed)) + # + # save the field hdr.tid in the array tid_backup + ll_assert(num < maxnum, "overflow of the tid_backup table") + self.tid_backup[num] = self.get_type_id(obj) + num += 1 + # compute forward_offset, the offset to the future copy + # of this object + forward_offset = toaddr - base_forwarding_addr + # copy the first two gc flags in forward_offset + ll_assert(forward_offset & 3 == 0, "misalignment!") + forward_offset |= (hdr.tid >> first_gcflag_bit) & 3 + hdr.tid = forward_offset | GCFLAG_MARKBIT + ll_assert(self.marked(obj), "re-marking object failed!") + # done + toaddr += totaldstsize + # + fromaddr += totalsrcsize + if not translated_to_c(): + assert toaddr - base_forwarding_addr <= fromaddr - self.space + self.num_alive_objs = num + self.finaladdr = toaddr # now update references self.root_walker.walk_roots( - MarkCompactGC._update_root, # stack roots - MarkCompactGC._update_root, # static in prebuilt non-gc structures - MarkCompactGC._update_root) # static in prebuilt gc objects + MarkCompactGC._update_ref, # stack roots + MarkCompactGC._update_ref, # static in prebuilt non-gc structures + MarkCompactGC._update_ref) # static in prebuilt gc objects + self.walk_marked_objects(MarkCompactGC.trace_and_update_ref) + + def walk_marked_objects(self, callback): + num = 0 + size_gc_header = self.gcheaderbuilder.size_gc_header fromaddr = self.space - i = 0 + toaddr = self.base_forwarding_addr while fromaddr < self.free: hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR)) obj = fromaddr + size_gc_header - objsize = self.get_size_from_backup(obj, i) - totalsize = size_gc_header + objsize - if not self.surviving(obj): - pass + survives = self.marked(obj) + if survives: + typeid = self.get_typeid_from_backup(num) + num += 1 else: - self.trace_with_backup(obj, self._update_ref, i) - fromaddr += totalsize - i += 1 - return toaddr + typeid = self.get_type_id(obj) + baseobjsize = self._get_size_for_typeid(obj, typeid) + basesize = size_gc_header + baseobjsize + totalsrcsize = basesize + # + if survives: + grow_hash_field = False + if hdr.tid & GCFLAG_SAVED_HASHFIELD: + totalsrcsize += llmemory.sizeof(lltype.Signed) + totaldstsize = totalsrcsize + if (hdr.tid & (GCFLAG_SAVED_HASHTAKEN|GCFLAG_SAVED_HASHFIELD) + == GCFLAG_SAVED_HASHTAKEN): + if self.toaddr_smaller_than_fromaddr(toaddr, fromaddr): + grow_hash_field = True + totaldstsize += llmemory.sizeof(lltype.Signed) + callback(self, obj, typeid, basesize, toaddr, grow_hash_field) + toaddr += totaldstsize + else: + if hdr.tid & GCFLAG_HASHFIELD: + totalsrcsize += llmemory.sizeof(lltype.Signed) + # + fromaddr += totalsrcsize + walk_marked_objects._annspecialcase_ = 'specialize:arg(1)' - def trace_with_backup(self, obj, callback, arg): + def trace_and_update_ref(self, obj, typeid, _1, _2, _3): """Enumerate the locations inside the given obj that can contain GC pointers. For each such location, callback(pointer, arg) is called, where 'pointer' is an address inside the object. Typically, 'callback' is a bound method and 'arg' can be None. """ - typeid = self.get_typeid_from_backup(arg) if self.is_gcarrayofgcptr(typeid): # a performance shortcut for GcArray(gcptr) length = (obj + llmemory.gcarrayofptr_lengthoffset).signed[0] item = obj + llmemory.gcarrayofptr_itemsoffset while length > 0: - if self.points_to_valid_gc_object(item): - callback(item, arg) + self._update_ref(item) item += llmemory.gcarrayofptr_singleitemoffset length -= 1 return @@ -444,8 +517,7 @@ i = 0 while i < len(offsets): item = obj + offsets[i] - if self.points_to_valid_gc_object(item): - callback(item, arg) + self._update_ref(item) i += 1 if self.has_gcptr_in_varsize(typeid): item = obj + self.varsize_offset_to_variable_part(typeid) @@ -456,171 +528,122 @@ j = 0 while j < len(offsets): itemobj = item + offsets[j] - if self.points_to_valid_gc_object(itemobj): - callback(itemobj, arg) + self._update_ref(itemobj) j += 1 item += itemlength length -= 1 - trace_with_backup._annspecialcase_ = 'specialize:arg(2)' - - def _update_root(self, pointer): - if pointer.address[0] != NULL: - pointer.address[0] = self.get_forwarding_address(pointer.address[0]) - - def _update_ref(self, pointer, ignore): - if pointer.address[0] != NULL: - pointer.address[0] = self.get_forwarding_address(pointer.address[0]) + else: + weakofs = self.weakpointer_offset(typeid) + if weakofs >= 0: + self._update_weakref(obj + weakofs) + + def _update_ref(self, pointer): + if self.points_to_valid_gc_object(pointer): + pointer.address[0] = self.get_forwarding_address( + pointer.address[0]) + + def _update_weakref(self, pointer): + # either update the weak pointer's destination, or + # if it dies, write a NULL + if self.points_to_valid_gc_object(pointer): + if self.marked(pointer.address[0]): + pointer.address[0] = self.get_forwarding_address( + pointer.address[0]) + else: + pointer.address[0] = NULL def _is_external(self, obj): - return not (self.space <= obj < self.top_of_space) + return not (self.space <= obj < self.free) def get_forwarding_address(self, obj): if self._is_external(obj): return obj return self.get_header_forwarded_addr(obj) - def set_null_forwarding_address(self, obj, num): - self.backup_typeid(num, obj) - hdr = self.header(obj) - hdr.tid = -1 # make the object forwarded to NULL - - def set_forwarding_address(self, obj, newobjhdr, num): - self.backup_typeid(num, obj) - forward_offset = newobjhdr - self.base_forwarding_addr - hdr = self.header(obj) - hdr.tid = forward_offset # make the object forwarded to newobj - - def restore_normal_header(self, obj, num): - # Reverse of set_forwarding_address(). - typeid16 = self.get_typeid_from_backup(num) - hdr = self.header_forwarded(obj) - hdr.tid = self.combine(typeid16, 0) # restore the normal header - def get_header_forwarded_addr(self, obj): - return (self.base_forwarding_addr + - self.header_forwarded(obj).tid + - self.gcheaderbuilder.size_gc_header) + tid = self.header_forwarded(obj).tid + ll_assert(tid & GCFLAG_MARKBIT != 0, "dying object is not forwarded") + GCFLAG_MASK = ~(GCFLAG_MARKBIT | 3) + res = (self.base_forwarding_addr + (tid & GCFLAG_MASK) + + self.gcheaderbuilder.size_gc_header) + ll_assert(res < self.finaladdr, "forwarded address >= self.finaladdr") + return res def surviving(self, obj): - return self._is_external(obj) or self.header_forwarded(obj).tid != -1 - - def backup_typeid(self, num, obj): - self.tid_backup[num] = self.get_type_id(obj) + return self.marked(obj) def get_typeid_from_backup(self, num): return self.tid_backup[num] - def get_size_from_backup(self, obj, num): - typeid = self.get_typeid_from_backup(num) - size = self.fixed_size(typeid) - if self.is_varsize(typeid): - lenaddr = obj + self.varsize_offset_to_length(typeid) - length = lenaddr.signed[0] - size += length * self.varsize_item_sizes(typeid) - size = llarena.round_up_for_allocation(size) - # XXX maybe we should parametrize round_up_for_allocation() - # per GC; if we do, we also need to fix the call in - # gctypelayout.encode_type_shape() - return size + def compact(self): + self.walk_marked_objects(MarkCompactGC.copy_and_compact) - def compact(self, resizing): - fromaddr = self.space - size_gc_header = self.gcheaderbuilder.size_gc_header - start = fromaddr - end = fromaddr - num = 0 - while fromaddr < self.free: - obj = fromaddr + size_gc_header - objsize = self.get_size_from_backup(obj, num) - totalsize = size_gc_header + objsize - if not self.surviving(obj): - # this object dies. Following line is a noop in C, - # we clear it to make debugging easier - llarena.arena_reset(fromaddr, totalsize, False) - else: - if resizing: - end = fromaddr - forward_obj = self.get_header_forwarded_addr(obj) - self.restore_normal_header(obj, num) - if obj != forward_obj: - #llop.debug_print(lltype.Void, "Copying from to", - # fromaddr, forward_ptr, totalsize) - llmemory.raw_memmove(fromaddr, - forward_obj - size_gc_header, - totalsize) - if resizing and end - start > GC_CLEARANCE: - diff = end - start - #llop.debug_print(lltype.Void, "Cleaning", start, diff) - diff = (diff / GC_CLEARANCE) * GC_CLEARANCE - #llop.debug_print(lltype.Void, "Cleaning", start, diff) - end = start + diff - if we_are_translated(): - # XXX wuaaaaa.... those objects are freed incorrectly - # here in case of test_gc - llarena.arena_reset(start, diff, True) - start += diff - num += 1 - fromaddr += totalsize + def copy_and_compact(self, obj, typeid, basesize, toaddr, grow_hash_field): + # 'basesize' is the size without any hash field + # restore the normal header + hdr = self.header_forwarded(obj) + gcflags = hdr.tid & 3 + if grow_hash_field: + gcflags |= GCFLAG_SAVED_HASHFIELD + hashvalue = self.get_identityhash_from_addr(obj) + elif gcflags & GCFLAG_SAVED_HASHFIELD: + fromaddr = llarena.getfakearenaaddress(obj) + fromaddr -= self.gcheaderbuilder.size_gc_header + hashvalue = (fromaddr + basesize).signed[0] + else: + hashvalue = 0 # not used + # + hdr.tid = self.combine(typeid, gcflags << first_gcflag_bit) + # + fromaddr = obj - self.gcheaderbuilder.size_gc_header + if translated_to_c(): + llmemory.raw_memmove(fromaddr, toaddr, basesize) + else: + llmemory.raw_memcopy(fromaddr, toaddr, basesize) + # + if gcflags & GCFLAG_SAVED_HASHFIELD: + (toaddr + basesize).signed[0] = hashvalue def debug_check_object(self, obj): - # not sure what to check here - pass - - def mark_objects_with_finalizers(self): + type_id = self.get_type_id(obj) + self.has_gcptr_in_varsize(type_id) # checks that the type_id is valid + # + tid = self.header(obj).tid + if self._is_external(obj): + # All external objects have GCFLAG_MARKBIT and GCFLAG_HASHTAKEN + # set. + assert tid & GCFLAG_MARKBIT + assert tid & GCFLAG_HASHTAKEN + else: + # Non-external objects have GCFLAG_MARKBIT that should not be set + # at the very start or at the very end of a collection -- only + # temporarily during the collection. + assert tid & GCFLAG_MARKBIT == 0 + + def trace_from_objects_with_finalizers(self): + if self.run_finalizers.non_empty(): # uncommon case + new_run_finalizers = self.AddressDeque() + while self.run_finalizers.non_empty(): + x = self.run_finalizers.popleft() + self.mark(x) + new_run_finalizers.append(x) + self.run_finalizers.delete() + self.run_finalizers = new_run_finalizers + # + # xxx we get to run the finalizers in a random order + self._trace_and_mark() new_with_finalizers = self.AddressDeque() - run_finalizers = self.run_finalizers - new_run_finalizers = self.AddressDeque() - while run_finalizers.non_empty(): - x = run_finalizers.popleft() - self.mark(x) - self.to_see.append(x) - new_run_finalizers.append(x) - run_finalizers.delete() - self.run_finalizers = new_run_finalizers while self.objects_with_finalizers.non_empty(): x = self.objects_with_finalizers.popleft() if self.marked(x): new_with_finalizers.append(x) else: - new_run_finalizers.append(x) + self.run_finalizers.append(x) self.mark(x) - self.to_see.append(x) + self._trace_and_mark() self.objects_with_finalizers.delete() self.objects_with_finalizers = new_with_finalizers - def invalidate_weakrefs(self, weakref_offsets): - # walk over list of objects that contain weakrefs - # if the object it references survives then update the weakref - # otherwise invalidate the weakref - new_with_weakref = self.AddressStack() - i = 0 - while self.objects_with_weakrefs.non_empty(): - obj = self.objects_with_weakrefs.pop() - if not self.surviving(obj): - continue # weakref itself dies - newobj = self.get_forwarding_address(obj) - offset = weakref_offsets[i] - pointing_to = (obj + offset).address[0] - # XXX I think that pointing_to cannot be NULL here - if pointing_to: - if self.surviving(pointing_to): - (obj + offset).address[0] = self.get_forwarding_address( - pointing_to) - new_with_weakref.append(newobj) - else: - (obj + offset).address[0] = NULL - i += 1 - self.objects_with_weakrefs.delete() - self.objects_with_weakrefs = new_with_weakref - lltype.free(weakref_offsets, flavor='raw') - - def get_size_incl_hash(self, obj): - size = self.get_size(obj) - hdr = self.header(obj) - if hdr.tid & GCFLAG_HASHFIELD: - size += llmemory.sizeof(lltype.Signed) - return size - def identityhash(self, gcobj): # Unlike SemiSpaceGC.identityhash(), this function does not have # to care about reducing top_of_space. The reason is as @@ -635,8 +658,23 @@ hdr = self.header(obj) # if hdr.tid & GCFLAG_HASHFIELD: # the hash is in a field at the end - obj += self.get_size(obj) + obj = llarena.getfakearenaaddress(obj) + self.get_size(obj) return obj.signed[0] # hdr.tid |= GCFLAG_HASHTAKEN - return llmemory.cast_adr_to_int(obj) # direct case + return self.get_identityhash_from_addr(obj) + + def get_identityhash_from_addr(self, obj): + if translated_to_c(): + return llmemory.cast_adr_to_int(obj) # direct case + else: + try: + adr = llarena.getfakearenaaddress(obj) # -> arena address + except RuntimeError: + return llmemory.cast_adr_to_int(obj) # not in an arena... + return adr - self.space + +# ____________________________________________________________ + +class CannotAllocateGCArena(Exception): + pass Modified: pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py Fri Sep 3 13:39:11 2010 @@ -67,7 +67,10 @@ from pypy.config.pypyoption import get_pypy_config config = get_pypy_config(translating=True).translation self.stackroots = [] - self.gc = self.GCClass(config, **self.GC_PARAMS) + GC_PARAMS = self.GC_PARAMS.copy() + if hasattr(meth, 'GC_PARAMS'): + GC_PARAMS.update(meth.GC_PARAMS) + self.gc = self.GCClass(config, **GC_PARAMS) self.gc.DEBUG = True self.rootwalker = DirectRootWalker(self) self.gc.set_root_walker(self.rootwalker) @@ -96,7 +99,7 @@ p[index] = newvalue def malloc(self, TYPE, n=None): - addr = self.gc.malloc(self.get_type_id(TYPE), n) + addr = self.gc.malloc(self.get_type_id(TYPE), n, zero=True) return llmemory.cast_adr_to_ptr(addr, lltype.Ptr(TYPE)) def test_simple(self): @@ -311,7 +314,18 @@ print hash assert isinstance(hash, (int, long)) assert hash == self.gc.identityhash(p_const) - + # (5) p is actually moving (for the markcompact gc) + p0 = self.malloc(S) + self.stackroots.append(p0) + p = self.malloc(S) + self.stackroots.append(p) + hash = self.gc.identityhash(p) + self.stackroots.pop(-2) + self.gc.collect() # p0 goes away, p shifts left + assert hash == self.gc.identityhash(self.stackroots[-1]) + self.gc.collect() + assert hash == self.gc.identityhash(self.stackroots[-1]) + self.stackroots.pop() class TestSemiSpaceGC(DirectGCTest): from pypy.rpython.memory.gc.semispace import SemiSpaceGC as GCClass @@ -431,3 +445,14 @@ class TestMarkCompactGC(DirectGCTest): from pypy.rpython.memory.gc.markcompact import MarkCompactGC as GCClass + def test_many_objects(self): + DirectGCTest.test_many_objects(self) + test_many_objects.GC_PARAMS = {'space_size': 3 * 1024 * WORD} + + def test_varsized_from_stack(self): + DirectGCTest.test_varsized_from_stack(self) + test_varsized_from_stack.GC_PARAMS = {'space_size': 2 * 1024 * WORD} + + def test_varsized_from_prebuilt_gc(self): + DirectGCTest.test_varsized_from_prebuilt_gc(self) + test_varsized_from_prebuilt_gc.GC_PARAMS = {'space_size': 3 * 1024 * WORD} Modified: pypy/trunk/pypy/rpython/memory/gctypelayout.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gctypelayout.py (original) +++ pypy/trunk/pypy/rpython/memory/gctypelayout.py Fri Sep 3 13:39:11 2010 @@ -44,16 +44,18 @@ self.type_info_group_ptr = type_info_group._as_ptr() def get(self, typeid): - _check_typeid(typeid) - return llop.get_group_member(GCData.TYPE_INFO_PTR, - self.type_info_group_ptr, - typeid) + res = llop.get_group_member(GCData.TYPE_INFO_PTR, + self.type_info_group_ptr, + typeid) + _check_valid_type_info(res) + return res def get_varsize(self, typeid): - _check_typeid(typeid) - return llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR, - self.type_info_group_ptr, - typeid) + res = llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR, + self.type_info_group_ptr, + typeid) + _check_valid_type_info_varsize(res) + return res def q_is_varsize(self, typeid): infobits = self.get(typeid).infobits @@ -115,15 +117,21 @@ # the lowest 16bits are used to store group member index -T_MEMBER_INDEX = 0xffff +T_MEMBER_INDEX = 0xffff T_IS_VARSIZE = 0x10000 T_HAS_GCPTR_IN_VARSIZE = 0x20000 T_IS_GCARRAY_OF_GCPTR = 0x40000 T_IS_WEAKREF = 0x80000 +T_KEY_MASK = 0xFF000000 +T_KEY_VALUE = 0x7A000000 # bug detection only -def _check_typeid(typeid): - ll_assert(llop.is_group_member_nonzero(lltype.Bool, typeid), - "invalid type_id") +def _check_valid_type_info(p): + ll_assert(p.infobits & T_KEY_MASK == T_KEY_VALUE, "invalid type_id") + +def _check_valid_type_info_varsize(p): + ll_assert(p.header.infobits & (T_KEY_MASK | T_IS_VARSIZE) == + (T_KEY_VALUE | T_IS_VARSIZE), + "invalid varsize type_id") def encode_type_shape(builder, info, TYPE, index): @@ -167,7 +175,7 @@ varinfo.varitemsize = llmemory.sizeof(ARRAY.OF) if TYPE == WEAKREF: infobits |= T_IS_WEAKREF - info.infobits = infobits + info.infobits = infobits | T_KEY_VALUE # ____________________________________________________________ @@ -250,14 +258,18 @@ _, TYPE = TYPE._first_struct() def get_info(self, type_id): - return llop.get_group_member(GCData.TYPE_INFO_PTR, - self.type_info_group._as_ptr(), - type_id) + res = llop.get_group_member(GCData.TYPE_INFO_PTR, + self.type_info_group._as_ptr(), + type_id) + _check_valid_type_info(res) + return res def get_info_varsize(self, type_id): - return llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR, - self.type_info_group._as_ptr(), - type_id) + res = llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR, + self.type_info_group._as_ptr(), + type_id) + _check_valid_type_info_varsize(res) + return res def is_weakref(self, type_id): return self.get_info(type_id).infobits & T_IS_WEAKREF Modified: pypy/trunk/pypy/rpython/memory/gcwrapper.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gcwrapper.py (original) +++ pypy/trunk/pypy/rpython/memory/gcwrapper.py Fri Sep 3 13:39:11 2010 @@ -119,6 +119,9 @@ else: return True + def pyobjectptr(self, klass): + raise NotImplementedError(klass) + # ____________________________________________________________ class LLInterpRootWalker: Modified: pypy/trunk/pypy/rpython/memory/test/test_gc.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/test/test_gc.py (original) +++ pypy/trunk/pypy/rpython/memory/test/test_gc.py Fri Sep 3 13:39:11 2010 @@ -639,12 +639,14 @@ class TestMarkCompactGC(TestSemiSpaceGC): from pypy.rpython.memory.gc.markcompact import MarkCompactGC as GCClass + GC_PARAMS = {'space_size': 65536+16384} + GC_CAN_SHRINK_ARRAY = False def test_finalizer_order(self): py.test.skip("Not implemented yet") - -class TestMarkCompactGCGrowing(TestMarkCompactGC): - GC_PARAMS = {'space_size': 16*WORD} + def test_writebarrier_before_copy(self): + py.test.skip("Not relevant, and crashes because llarena does not " + "support empty GcStructs") class TestHybridGC(TestGenerationalGC): from pypy.rpython.memory.gc.hybrid import HybridGC as GCClass Modified: pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py (original) +++ pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py Fri Sep 3 13:39:11 2010 @@ -1138,15 +1138,16 @@ class TestMarkCompactGC(GenericMovingGCTests): gcname = 'markcompact' - def setup_class(cls): - py.test.skip("Disabled for now, sorry") - class gcpolicy(gc.FrameworkGcPolicy): class transformerclass(framework.FrameworkGCTransformer): from pypy.rpython.memory.gc.markcompact import MarkCompactGC as GCClass - GC_PARAMS = {'space_size': 512*WORD} + GC_PARAMS = {'space_size': 4096*WORD} root_stack_depth = 200 + def test_writebarrier_before_copy(self): + py.test.skip("Not relevant, and crashes because llarena does not " + "support empty GcStructs") + class TestGenerationGC(GenericMovingGCTests): gcname = "generation" GC_CAN_SHRINK_ARRAY = True @@ -1536,3 +1537,12 @@ GC_PARAMS = {'space_size': 512*WORD, 'nursery_size': 32*WORD} root_stack_depth = 200 + +class TestMarkCompactTaggedpointerGC(TaggedPointerGCTests): + gcname = 'markcompact' + + class gcpolicy(gc.FrameworkGcPolicy): + class transformerclass(framework.FrameworkGCTransformer): + from pypy.rpython.memory.gc.markcompact import MarkCompactGC as GCClass + GC_PARAMS = {'space_size': 4096*WORD} + root_stack_depth = 200 Modified: pypy/trunk/pypy/translator/c/test/test_newgc.py ============================================================================== --- pypy/trunk/pypy/translator/c/test/test_newgc.py (original) +++ pypy/trunk/pypy/translator/c/test/test_newgc.py Fri Sep 3 13:39:11 2010 @@ -67,9 +67,8 @@ if not fullname.startswith('define'): continue keyword = conftest.option.keyword - if keyword: - if keyword.startswith('test_'): - keyword = keyword[len('test_'):] + if keyword.startswith('test_'): + keyword = keyword[len('test_'):] if keyword not in fullname: continue prefix, name = fullname.split('_', 1) @@ -1072,21 +1071,66 @@ should_be_moving = True GC_CAN_SHRINK_ARRAY = False - def setup_class(cls): - py.test.skip("Disabled for now") - def test_gc_set_max_heap_size(self): py.test.skip("not implemented") + def test_gc_heap_stats(self): + py.test.skip("not implemented") + def test_finalizer_order(self): py.test.skip("not implemented") + def define_adding_a_hash(cls): + from pypy.rlib.objectmodel import compute_identity_hash + S1 = lltype.GcStruct('S1', ('x', lltype.Signed)) + S2 = lltype.GcStruct('S2', ('p1', lltype.Ptr(S1)), + ('p2', lltype.Ptr(S1)), + ('p3', lltype.Ptr(S1)), + ('p4', lltype.Ptr(S1)), + ('p5', lltype.Ptr(S1)), + ('p6', lltype.Ptr(S1)), + ('p7', lltype.Ptr(S1)), + ('p8', lltype.Ptr(S1)), + ('p9', lltype.Ptr(S1))) + def g(): + lltype.malloc(S1) # forgotten, will be shifted over + s2 = lltype.malloc(S2) # a big object, overlaps its old position + s2.p1 = lltype.malloc(S1); s2.p1.x = 1010 + s2.p2 = lltype.malloc(S1); s2.p2.x = 1020 + s2.p3 = lltype.malloc(S1); s2.p3.x = 1030 + s2.p4 = lltype.malloc(S1); s2.p4.x = 1040 + s2.p5 = lltype.malloc(S1); s2.p5.x = 1050 + s2.p6 = lltype.malloc(S1); s2.p6.x = 1060 + s2.p7 = lltype.malloc(S1); s2.p7.x = 1070 + s2.p8 = lltype.malloc(S1); s2.p8.x = 1080 + s2.p9 = lltype.malloc(S1); s2.p9.x = 1090 + return s2 + def f(): + rgc.collect() + s2 = g() + h2 = compute_identity_hash(s2) + rgc.collect() # shift s2 to the left, but add a hash field + assert s2.p1.x == 1010 + assert s2.p2.x == 1020 + assert s2.p3.x == 1030 + assert s2.p4.x == 1040 + assert s2.p5.x == 1050 + assert s2.p6.x == 1060 + assert s2.p7.x == 1070 + assert s2.p8.x == 1080 + assert s2.p9.x == 1090 + return h2 - compute_identity_hash(s2) + return f + + def test_adding_a_hash(self): + res = self.run("adding_a_hash") + assert res == 0 + # ____________________________________________________________________ -class TestHybridTaggedPointers(TestHybridGC): +class TaggedPointersTest(object): taggedpointers = True - def define_tagged(cls): class Unrelated(object): pass @@ -1129,3 +1173,10 @@ __slots__ = 'smallint' def meth(self, x): return self.smallint + x + 3 + + +class TestHybridTaggedPointers(TaggedPointersTest, TestHybridGC): + pass + +class TestMarkCompactGCMostCompact(TaggedPointersTest, TestMarkCompactGC): + removetypeptr = True Modified: pypy/trunk/pypy/translator/exceptiontransform.py ============================================================================== --- pypy/trunk/pypy/translator/exceptiontransform.py (original) +++ pypy/trunk/pypy/translator/exceptiontransform.py Fri Sep 3 13:39:11 2010 @@ -197,7 +197,7 @@ for graph in self.translator.graphs: self.create_exception_handling(graph) - def create_exception_handling(self, graph, always_exc_clear=False): + def create_exception_handling(self, graph): """After an exception in a direct_call (or indirect_call), that is not caught by an explicit except statement, we need to reraise the exception. So after this @@ -212,7 +212,6 @@ self.raise_analyzer.analyze_direct_call(graph) graph.exceptiontransformed = self.exc_data_ptr - self.always_exc_clear = always_exc_clear join_blocks(graph) # collect the blocks before changing them n_need_exc_matching_blocks = 0 @@ -455,13 +454,18 @@ block.recloseblock(l0, l) insert_zeroing_op = False - # XXX this is not right. it also inserts zero_gc_pointers_inside - # XXX on a path that malloc_nonmovable returns null, but does not raise - # XXX which might end up with a segfault. But we don't have such gc now - if spaceop.opname == 'malloc' or spaceop.opname == 'malloc_nonmovable': + if spaceop.opname == 'malloc': flavor = spaceop.args[1].value['flavor'] if flavor == 'gc': insert_zeroing_op = True + elif spaceop.opname == 'malloc_nonmovable': + # xxx we cannot insert zero_gc_pointers_inside after + # malloc_nonmovable, because it can return null. For now + # we simply always force the zero=True flag on + # malloc_nonmovable. + c_flags = spaceop.args[1] + c_flags.value = c_flags.value.copy() + spaceop.args[1].value['zero'] = True if insert_zeroing_op: if normalafterblock is None: @@ -479,16 +483,6 @@ [v_result_after], varoftype(lltype.Void))) - if self.always_exc_clear: - # insert code that clears the exception even in the non-exceptional - # case... this is a hint for the JIT, but pointless otherwise - if normalafterblock is None: - normalafterblock = insert_empty_block(None, l0) - llops = rtyper.LowLevelOpList(None) - self.gen_setfield('exc_value', self.c_null_evalue, llops) - self.gen_setfield('exc_type', self.c_null_etype, llops) - normalafterblock.operations[:0] = llops - class LLTypeExceptionTransformer(BaseExceptionTransformer): From arigo at codespeak.net Fri Sep 3 13:39:44 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 3 Sep 2010 13:39:44 +0200 (CEST) Subject: [pypy-svn] r76853 - pypy/branch/markcompact Message-ID: <20100903113944.4C5A3282BE9@codespeak.net> Author: arigo Date: Fri Sep 3 13:39:42 2010 New Revision: 76853 Removed: pypy/branch/markcompact/ Log: Remove merged branch. From arigo at codespeak.net Fri Sep 3 14:01:23 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 3 Sep 2010 14:01:23 +0200 (CEST) Subject: [pypy-svn] r76854 - pypy/build/bot2/pypybuildbot Message-ID: <20100903120123.02F19282B9C@codespeak.net> Author: arigo Date: Fri Sep 3 14:01:22 2010 New Revision: 76854 Modified: pypy/build/bot2/pypybuildbot/master.py Log: Finish the integration of the builder pages from builtbot and the summary pages from summary.py, by adding a link directly from the former to the latter. Modified: pypy/build/bot2/pypybuildbot/master.py ============================================================================== --- pypy/build/bot2/pypybuildbot/master.py (original) +++ pypy/build/bot2/pypybuildbot/master.py Fri Sep 3 14:01:22 2010 @@ -30,6 +30,47 @@ StatusResourceBuilder.ping = my_ping # Disabled. +# Add a link from the builder page to the summary page +def my_body(self, req): + data = _previous_body(self, req) + MARKER = 'waterfall)' + i = data.find(MARKER) + if i >= 0: + from twisted.web import html + i += len(MARKER) + b = self.builder_status + url = self.path_to_root(req)+"summary?builder="+html.escape(b.getName()) + data = '%s   (view in summary)%s' % ( + data[:i], + url, + data[i:]) + return data +_previous_body = StatusResourceBuilder.body +StatusResourceBuilder.body = my_body +# Done + +# Add a similar link from the build page to the summary page +def my_body_2(self, req): + data = _previous_body_2(self, req) + MARKER = '

SourceStamp' + i = data.find(MARKER) + if i >= 0: + from twisted.web import html + b = self.build_status + ss = b.getSourceStamp() + branch = ss.branch or '' + builder_name = b.getBuilder().getName() + url = (self.path_to_root(req) + + "summary?builder=" + html.escape(builder_name) + + "&branch=" + html.escape(branch)) + data = '%s   (view in summary)\n\n%s'% ( + data[:i], + url, + data[i:]) + return data +_previous_body_2 = StatusResourceBuild.body +StatusResourceBuild.body = my_body_2 + # Picking a random slave is not really what we want; # let's pick the first available one instead. Builder.CHOOSE_SLAVES_RANDOMLY = False From arigo at codespeak.net Fri Sep 3 14:42:52 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 3 Sep 2010 14:42:52 +0200 (CEST) Subject: [pypy-svn] r76855 - pypy/trunk/lib_pypy/_ctypes Message-ID: <20100903124252.68A0B282B9C@codespeak.net> Author: arigo Date: Fri Sep 3 14:42:50 2010 New Revision: 76855 Modified: pypy/trunk/lib_pypy/_ctypes/array.py pypy/trunk/lib_pypy/_ctypes/function.py pypy/trunk/lib_pypy/_ctypes/primitive.py Log: Kill a few unneeded local imports. Modified: pypy/trunk/lib_pypy/_ctypes/array.py ============================================================================== --- pypy/trunk/lib_pypy/_ctypes/array.py (original) +++ pypy/trunk/lib_pypy/_ctypes/array.py Fri Sep 3 14:42:50 2010 @@ -75,7 +75,7 @@ def _CData_output(self, resarray, base=None, index=-1): # this seems to be a string if we're array of char, surprise! - from ctypes import c_char, c_wchar, c_char_p, c_wchar_p + from ctypes import c_char, c_wchar if self._type_ is c_char: return _rawffi.charp2string(resarray.buffer, self._length_) if self._type_ is c_wchar: Modified: pypy/trunk/lib_pypy/_ctypes/function.py ============================================================================== --- pypy/trunk/lib_pypy/_ctypes/function.py (original) +++ pypy/trunk/lib_pypy/_ctypes/function.py Fri Sep 3 14:42:50 2010 @@ -60,7 +60,6 @@ return self._restype_ def _setrestype(self, restype): self._ptr = None - from ctypes import c_char_p if restype is int: from ctypes import c_int restype = c_int @@ -214,9 +213,7 @@ @staticmethod def _guess_argtypes(args): - from _ctypes import _CData from ctypes import c_char_p, c_wchar_p, c_void_p, c_int - from ctypes import Array, Structure res = [] for arg in args: if hasattr(arg, '_as_parameter_'): Modified: pypy/trunk/lib_pypy/_ctypes/primitive.py ============================================================================== --- pypy/trunk/lib_pypy/_ctypes/primitive.py (original) +++ pypy/trunk/lib_pypy/_ctypes/primitive.py Fri Sep 3 14:42:50 2010 @@ -57,7 +57,6 @@ pyobj_container = GlobalPyobjContainer() def generic_xxx_p_from_param(cls, value): - from _ctypes import Array, _Pointer if value is None: return cls(None) if isinstance(value, basestring): @@ -119,8 +118,6 @@ result._ffiarray = ffiarray if tp == 'z': # c_char_p - from _ctypes import Array, _Pointer - def _getvalue(self): addr = self._buffer[0] if addr == 0: @@ -143,7 +140,7 @@ result.value = property(_getvalue, _setvalue) elif tp == 'Z': # c_wchar_p - from _ctypes import Array, _Pointer, _wstring_at + from _ctypes import _wstring_at def _getvalue(self): addr = self._buffer[0] if addr == 0: From arigo at codespeak.net Fri Sep 3 17:39:50 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 3 Sep 2010 17:39:50 +0200 (CEST) Subject: [pypy-svn] r76857 - in pypy/trunk/pypy: jit/codewriter objspace/std rpython rpython/lltypesystem rpython/lltypesystem/test rpython/memory/test rpython/ootypesystem rpython/test translator/backendopt/test Message-ID: <20100903153950.2ADFE282B9C@codespeak.net> Author: arigo Date: Fri Sep 3 17:39:47 2010 New Revision: 76857 Modified: pypy/trunk/pypy/jit/codewriter/jtransform.py pypy/trunk/pypy/objspace/std/tupleobject.py pypy/trunk/pypy/rpython/lltypesystem/lloperation.py pypy/trunk/pypy/rpython/lltypesystem/lltype.py pypy/trunk/pypy/rpython/lltypesystem/opimpl.py pypy/trunk/pypy/rpython/lltypesystem/test/test_lloperation.py pypy/trunk/pypy/rpython/lltypesystem/test/test_lltype.py pypy/trunk/pypy/rpython/memory/test/test_gctypelayout.py pypy/trunk/pypy/rpython/ootypesystem/ootype.py pypy/trunk/pypy/rpython/ootypesystem/rclass.py pypy/trunk/pypy/rpython/rclass.py pypy/trunk/pypy/rpython/test/test_rclass.py pypy/trunk/pypy/translator/backendopt/test/test_constfold.py Log: Merge branch/no-_immutable_. Unlike hinted by the name of the branch, this does not kill the _immutable_ hint; it just makes it saner (and _immutable_fields_ too). Now, the definition in pure Python terms is precisely that an instance x is fully immutable if and only if hasattr(x, '_immutable_'). During translation, it is now an error (reported by rpython/rclass.py) if we find the _immutable_ hint on a class but, on some parent class, we have non-trivial non-read-only attributes. Similarly, it is an error to list a field name in _immutable_fields_ if that field ends up on some parent class. However, it is ok to give a field name that is not actually present in this class. If/when it shows up on some subclass, it will be flagged as read-only there. The idea is again that if we say _immutable_fields_=['x'] on some class A, then any attribute 'x' on any instance of (a subclass of) A will be read-only. Modified: pypy/trunk/pypy/jit/codewriter/jtransform.py ============================================================================== --- pypy/trunk/pypy/jit/codewriter/jtransform.py (original) +++ pypy/trunk/pypy/jit/codewriter/jtransform.py Fri Sep 3 17:39:47 2010 @@ -511,14 +511,11 @@ arraydescr) return [] # check for deepfrozen structures that force constant-folding - hints = v_inst.concretetype.TO._hints - accessor = hints.get("immutable_fields") - if accessor and c_fieldname.value in accessor.fields: + immut = v_inst.concretetype.TO._immutable_field(c_fieldname.value) + if immut: pure = '_pure' - if accessor.fields[c_fieldname.value] == "[*]": + if immut == "[*]": self.immutable_arrays[op.result] = True - elif hints.get('immutable'): - pure = '_pure' else: pure = '' argname = getattr(v_inst.concretetype.TO, '_gckind', 'gc') Modified: pypy/trunk/pypy/objspace/std/tupleobject.py ============================================================================== --- pypy/trunk/pypy/objspace/std/tupleobject.py (original) +++ pypy/trunk/pypy/objspace/std/tupleobject.py Fri Sep 3 17:39:47 2010 @@ -10,7 +10,7 @@ class W_TupleObject(W_Object): from pypy.objspace.std.tupletype import tuple_typedef as typedef - _immutable_ = True + _immutable_fields_ = ['wrappeditems[*]'] def __init__(w_self, wrappeditems): make_sure_not_resized(wrappeditems) Modified: pypy/trunk/pypy/rpython/lltypesystem/lloperation.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/lloperation.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/lloperation.py Fri Sep 3 17:39:47 2010 @@ -85,16 +85,20 @@ fold = roproperty(get_fold_impl) def is_pure(self, args_v): - return (self.canfold or # canfold => pure operation - self is llop.debug_assert or # debug_assert is pure enough - # reading from immutable - (self in (llop.getfield, llop.getarrayitem) and - args_v[0].concretetype.TO._hints.get('immutable')) or - (self is llop.getfield and # reading from immutable_field - 'immutable_fields' in args_v[0].concretetype.TO._hints and - args_v[1].value in args_v[0].concretetype.TO - ._hints['immutable_fields'].fields)) - # XXX: what about ootype immutable arrays? + if self.canfold: # canfold => pure operation + return True + if self is llop.debug_assert: # debug_assert is pure enough + return True + # reading from immutable (lltype) + if self is llop.getfield or self is llop.getarrayitem: + field = getattr(args_v[1], 'value', None) + return args_v[0].concretetype.TO._immutable_field(field) + # reading from immutable (ootype) (xxx what about arrays?) + if self is llop.oogetfield: + field = getattr(args_v[1], 'value', None) + return args_v[0].concretetype._immutable_field(field) + # default + return False def __repr__(self): return '' % (getattr(self, 'opname', '?'),) Modified: pypy/trunk/pypy/rpython/lltypesystem/lltype.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/lltype.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/lltype.py Fri Sep 3 17:39:47 2010 @@ -297,6 +297,15 @@ n = 1 return _struct(self, n, initialization='example') + def _immutable_field(self, field): + if 'immutable_fields' in self._hints: + try: + s = self._hints['immutable_fields'].fields[field] + return s or True + except KeyError: + pass + return self._hints.get('immutable', False) + class RttiStruct(Struct): _runtime_type_info = None @@ -391,6 +400,9 @@ def _container_example(self): return _array(self, 1, initialization='example') + def _immutable_field(self, index=None): + return self._hints.get('immutable', False) + class GcArray(Array): _gckind = 'gc' def _inline_is_varsize(self, last): Modified: pypy/trunk/pypy/rpython/lltypesystem/opimpl.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/opimpl.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/opimpl.py Fri Sep 3 17:39:47 2010 @@ -150,12 +150,7 @@ # we can constant-fold this if the innermost structure from which we # read the final field is immutable. T = lltype.typeOf(innermostcontainer).TO - if T._hints.get('immutable'): - pass - elif ('immutable_fields' in T._hints and - offsets[-1] in T._hints['immutable_fields'].fields): - pass - else: + if not T._immutable_field(offsets[-1]): raise TypeError("cannot fold getinteriorfield on mutable struct") assert not isinstance(ob, lltype._interior_ptr) return ob @@ -437,19 +432,15 @@ def op_getfield(p, name): checkptr(p) TYPE = lltype.typeOf(p).TO - if TYPE._hints.get('immutable'): - pass - elif ('immutable_fields' in TYPE._hints and - name in TYPE._hints['immutable_fields'].fields): - pass - else: + if not TYPE._immutable_field(name): raise TypeError("cannot fold getfield on mutable struct") return getattr(p, name) def op_getarrayitem(p, index): checkptr(p) - if not lltype.typeOf(p).TO._hints.get('immutable'): - raise TypeError("cannot fold getfield on mutable array") + ARRAY = lltype.typeOf(p).TO + if not ARRAY._immutable_field(index): + raise TypeError("cannot fold getarrayitem on mutable array") return p[index] def _normalize(x): Modified: pypy/trunk/pypy/rpython/lltypesystem/test/test_lloperation.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/test/test_lloperation.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/test/test_lloperation.py Fri Sep 3 17:39:47 2010 @@ -88,7 +88,7 @@ accessor = rclass.FieldListAccessor() S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), hints={'immutable_fields': accessor}) - accessor.initialize(S3, ['x']) + accessor.initialize(S3, {'x': ''}) v_s3 = Variable() v_s3.concretetype = lltype.Ptr(S3) assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) @@ -103,7 +103,7 @@ accessor = rclass.FieldListAccessor() S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), hints={'immutable_fields': accessor}) - accessor.initialize(S3, ['x']) + accessor.initialize(S3, {'x': ''}) # s1 = lltype.malloc(S1); s1.x = 45 py.test.raises(TypeError, llop.getfield, lltype.Signed, s1, 'x') Modified: pypy/trunk/pypy/rpython/lltypesystem/test/test_lltype.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/test/test_lltype.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/test/test_lltype.py Fri Sep 3 17:39:47 2010 @@ -781,6 +781,28 @@ p = cast_opaque_ptr(llmemory.GCREF, a) assert hash1 == identityhash(p) +def test_immutable_hint(): + S = GcStruct('S', ('x', lltype.Signed)) + assert S._immutable_field('x') == False + # + S = GcStruct('S', ('x', lltype.Signed), hints={'immutable': True}) + assert S._immutable_field('x') == True + # + class FieldListAccessor(object): + def __init__(self, fields): + self.fields = fields + S = GcStruct('S', ('x', lltype.Signed), + hints={'immutable_fields': FieldListAccessor({'x':''})}) + assert S._immutable_field('x') == True + # + class FieldListAccessor(object): + def __init__(self, fields): + self.fields = fields + S = GcStruct('S', ('x', lltype.Signed), + hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) + assert S._immutable_field('x') == '[*]' + + class TestTrackAllocation: def setup_method(self, func): start_tracking_allocations() Modified: pypy/trunk/pypy/rpython/memory/test/test_gctypelayout.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/test/test_gctypelayout.py (original) +++ pypy/trunk/pypy/rpython/memory/test/test_gctypelayout.py Fri Sep 3 17:39:47 2010 @@ -101,7 +101,7 @@ accessor = rclass.FieldListAccessor() S3 = lltype.GcStruct('S', ('x', PT), ('y', PT), hints={'immutable_fields': accessor}) - accessor.initialize(S3, ['x']) + accessor.initialize(S3, {'x': ''}) # s1 = lltype.malloc(S1) adr = llmemory.cast_ptr_to_adr(s1) Modified: pypy/trunk/pypy/rpython/ootypesystem/ootype.py ============================================================================== --- pypy/trunk/pypy/rpython/ootypesystem/ootype.py (original) +++ pypy/trunk/pypy/rpython/ootypesystem/ootype.py Fri Sep 3 17:39:47 2010 @@ -267,6 +267,14 @@ return self._fields_with_default[:] return self._superclass._get_fields_with_default() + self._fields_with_default + def _immutable_field(self, field): + if 'immutable_fields' in self._hints: + try: + s = self._hints['immutable_fields'].fields[field] + return s or True + except KeyError: + pass + return self._hints.get('immutable', False) class SpecializableType(OOType): Modified: pypy/trunk/pypy/rpython/ootypesystem/rclass.py ============================================================================== --- pypy/trunk/pypy/rpython/ootypesystem/rclass.py (original) +++ pypy/trunk/pypy/rpython/ootypesystem/rclass.py Fri Sep 3 17:39:47 2010 @@ -194,6 +194,7 @@ self.lowleveltype._hints.update(hints) if self.classdef is None: + self.fields = {} self.allfields = {} self.allmethods = {} self.allclassattributes = {} @@ -210,6 +211,7 @@ allclassattributes = {} fields = {} + nonmangledfields = [] fielddefaults = {} if llfields: @@ -224,6 +226,7 @@ allfields[mangled] = repr oot = repr.lowleveltype fields[mangled] = oot + nonmangledfields.append(name) try: value = self.classdef.classdesc.read_attribute(name) fielddefaults[mangled] = repr.convert_desc_or_const(value) @@ -294,6 +297,7 @@ if not attrdef.s_value.is_constant(): classattributes[mangled] = attrdef.s_value, value + self.fields = nonmangledfields self.allfields = allfields self.allmethods = allmethods self.allclassattributes = allclassattributes Modified: pypy/trunk/pypy/rpython/rclass.py ============================================================================== --- pypy/trunk/pypy/rpython/rclass.py (original) +++ pypy/trunk/pypy/rpython/rclass.py Fri Sep 3 17:39:47 2010 @@ -9,6 +9,7 @@ class FieldListAccessor(object): def initialize(self, TYPE, fields): + assert type(fields) is dict self.TYPE = TYPE self.fields = fields @@ -18,6 +19,10 @@ def _freeze_(self): return True +class ImmutableConflictError(Exception): + """Raised when the _immutable_ or _immutable_fields_ hints are + not consistent across a class hierarchy.""" + def getclassrepr(rtyper, classdef): try: @@ -153,7 +158,7 @@ pass def _check_for_immutable_hints(self, hints): - if '_immutable_' in self.classdef.classdesc.classdict: + if self.classdef.classdesc.lookup('_immutable_') is not None: hints = hints.copy() hints['immutable'] = True self.immutable_field_list = [] # unless overwritten below @@ -182,16 +187,20 @@ return 'InstanceR %s' % (clsname,) def _setup_repr_final(self): + self._setup_immutable_field_list() + self._check_for_immutable_conflicts() + + def _setup_immutable_field_list(self): hints = self.object_type._hints if "immutable_fields" in hints: accessor = hints["immutable_fields"] - immutable_fields = {} - rbase = self - while rbase.classdef is not None: - immutable_fields.update( - dict.fromkeys(rbase.immutable_field_list)) - rbase = rbase.rbase - self._parse_field_list(immutable_fields, accessor) + if not hasattr(accessor, 'fields'): + immutable_fields = [] + rbase = self + while rbase.classdef is not None: + immutable_fields += rbase.immutable_field_list + rbase = rbase.rbase + self._parse_field_list(immutable_fields, accessor) def _parse_field_list(self, fields, accessor): with_suffix = {} @@ -209,6 +218,36 @@ accessor.initialize(self.object_type, with_suffix) return with_suffix + def _check_for_immutable_conflicts(self): + # check for conflicts, i.e. a field that is defined normally as + # mutable in some parent class but that is now declared immutable + from pypy.rpython.lltypesystem.lltype import Void + is_self_immutable = "immutable" in self.object_type._hints + base = self + while base.classdef is not None: + base = base.rbase + for fieldname in base.fields: + try: + mangled, r = base._get_field(fieldname) + except KeyError: + continue + if r.lowleveltype == Void: + continue + base._setup_immutable_field_list() + if base.object_type._immutable_field(mangled): + continue + # 'fieldname' is a mutable, non-Void field in the parent + if is_self_immutable: + raise ImmutableConflictError( + "class %r has _immutable_=True, but parent class %r " + "defines (at least) the mutable field %r" % ( + self, base, fieldname)) + if fieldname in self.immutable_field_list: + raise ImmutableConflictError( + "field %r is defined mutable in class %r, but " + "listed in _immutable_fields_ in subclass %r" % ( + fieldname, base, self)) + def new_instance(self, llops, classcallhop=None): raise NotImplementedError Modified: pypy/trunk/pypy/rpython/test/test_rclass.py ============================================================================== --- pypy/trunk/pypy/rpython/test/test_rclass.py (original) +++ pypy/trunk/pypy/rpython/test/test_rclass.py Fri Sep 3 17:39:47 2010 @@ -796,27 +796,92 @@ assert accessor.fields == {"inst_y" : ""} or \ accessor.fields == {"oy" : ""} # for ootype - def test_immutable_inheritance(self): - class I(object): - def __init__(self, v): - self.v = v - - class J(I): + def test_immutable_forbidden_inheritance_1(self): + from pypy.rpython.rclass import ImmutableConflictError + class A(object): + pass + class B(A): + _immutable_fields_ = ['v'] + def f(): + A().v = 123 + B() # crash: class B says 'v' is immutable, + # but it is defined on parent class A + py.test.raises(ImmutableConflictError, self.gengraph, f, []) + + def test_immutable_forbidden_inheritance_2(self): + from pypy.rpython.rclass import ImmutableConflictError + class A(object): + pass + class B(A): + _immutable_ = True + def f(): + A().v = 123 + B() # crash: class B has _immutable_ = True + # but class A defines 'v' to be mutable + py.test.raises(ImmutableConflictError, self.gengraph, f, []) + + def test_immutable_ok_inheritance_2(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + _immutable_fields_ = ['v'] + class B(A): + _immutable_ = True + def f(): + A().v = 123 + B().w = 456 + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] + try: + A_TYPE = B_TYPE.super + except AttributeError: + A_TYPE = B_TYPE._superclass # for ootype + accessor = A_TYPE._hints["immutable_fields"] + assert accessor.fields == {"inst_v" : ""} or \ + accessor.fields == {"ov" : ""} # for ootype + + def test_immutable_subclass_1(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + _immutable_ = True + class B(A): + pass + def f(): + B().v = 123 + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] # inherited from A + + def test_immutable_subclass_2(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + pass + class B(A): _immutable_ = True - def __init__(self, v, w): - self.w = w - I.__init__(self, v) - - j = J(3, 4) - def f(): - j.v = j.v * 1 # make the annotator think it is mutated - j.w = j.w * 1 # make the annotator think it is mutated - return j.v + j.w - - t, typer, graph = self.gengraph(f, [], backendopt=True) - f_summary = summary(graph) - assert f_summary == {"setfield": 2} or \ - f_summary == {"oosetfield": 2} # for ootype + def f(): + B().v = 123 + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] + + def test_immutable_subclass_void(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + pass + class B(A): + _immutable_ = True + def myfunc(): + pass + def f(): + A().f = myfunc # it's ok to add Void attributes to A + B().v = 123 # even though only B is declared _immutable_ + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] class TestLLtype(BaseTestRclass, LLRtypeMixin): Modified: pypy/trunk/pypy/translator/backendopt/test/test_constfold.py ============================================================================== --- pypy/trunk/pypy/translator/backendopt/test/test_constfold.py (original) +++ pypy/trunk/pypy/translator/backendopt/test/test_constfold.py Fri Sep 3 17:39:47 2010 @@ -49,7 +49,7 @@ accessor = rclass.FieldListAccessor() S2 = lltype.GcStruct('S2', ('x', lltype.Signed), hints={'immutable_fields': accessor}) - accessor.initialize(S2, ['x']) + accessor.initialize(S2, {'x': ''}) test_simple(S2) From arigo at codespeak.net Fri Sep 3 17:40:04 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 3 Sep 2010 17:40:04 +0200 (CEST) Subject: [pypy-svn] r76858 - pypy/branch/no-_immutable_ Message-ID: <20100903154004.6A3EE282BE9@codespeak.net> Author: arigo Date: Fri Sep 3 17:40:02 2010 New Revision: 76858 Removed: pypy/branch/no-_immutable_/ Log: Remove merged branch. From arigo at codespeak.net Fri Sep 3 18:31:09 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 3 Sep 2010 18:31:09 +0200 (CEST) Subject: [pypy-svn] r76859 - in pypy/trunk/pypy: jit/backend/llsupport rpython/memory Message-ID: <20100903163109.2C649282B9C@codespeak.net> Author: arigo Date: Fri Sep 3 18:31:07 2010 New Revision: 76859 Modified: pypy/trunk/pypy/jit/backend/llsupport/gc.py pypy/trunk/pypy/rpython/memory/gctypelayout.py Log: Bug fixes: * reintroduce check_typeid() in gctypelayout.py to help the code in jit/backend/llsupport/gc.py. * fix an order dependency by rewriting is_weakref(). Modified: pypy/trunk/pypy/jit/backend/llsupport/gc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/llsupport/gc.py (original) +++ pypy/trunk/pypy/jit/backend/llsupport/gc.py Fri Sep 3 18:31:07 2010 @@ -328,7 +328,7 @@ DEBUG = False # forced to True by x86/test/test_zrpy_gc.py def __init__(self, gcdescr, translator, rtyper, llop1=llop): - from pypy.rpython.memory.gctypelayout import _check_typeid + from pypy.rpython.memory.gctypelayout import check_typeid from pypy.rpython.memory.gcheader import GCHeaderBuilder from pypy.rpython.memory.gctransform import framework GcLLDescription.__init__(self, gcdescr, translator, rtyper) @@ -375,7 +375,7 @@ def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) has_finalizer = bool(tid & (1< Author: wlav Date: Fri Sep 3 20:20:24 2010 New Revision: 76860 Modified: pypy/branch/reflex-support/pypy/module/cppyy/converter.py pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py Log: Initial int array data member access. Modified: pypy/branch/reflex-support/pypy/module/cppyy/converter.py ============================================================================== --- pypy/branch/reflex-support/pypy/module/cppyy/converter.py (original) +++ pypy/branch/reflex-support/pypy/module/cppyy/converter.py Fri Sep 3 20:20:24 2010 @@ -193,7 +193,6 @@ # TODO: now what ... ?? AFAICS, w_value is a pure python list, not an array? # byteptr[0] = space.unwrap(space.id(w_value.getslotvalue(2))) - class ShortArrayConverter(ShortPtrConverter): def to_memory(self, space, w_obj, w_value, offset): # copy the full array (uses byte copy for now) @@ -202,6 +201,39 @@ for i in range(min(self.size*2, value.getlength())): fieldptr[i] = value.getitem(i) +class LongPtrConverter(TypeConverter): + _immutable_ = True + def __init__(self, detail=None): + if detail is None: + import sys + detail = sys.maxint + self.size = detail + + def convert_argument(self, space, w_obj): + assert "not yet implemented" + + def from_memory(self, space, w_obj, offset): + # read access, so no copy needed + fieldptr = self._get_fieldptr(space, w_obj, offset) + longptr = rffi.cast(rffi.LONGP, fieldptr) + w_array = unpack_simple_shape(space, space.wrap('l')) + return w_array.fromaddress(space, longptr, self.size) + + def to_memory(self, space, w_obj, w_value, offset): + # copy only the pointer value + obj = space.interpclass_w(space.findattr(w_obj, space.wrap("_cppinstance"))) + byteptr = rffi.cast(rffi.LONGP, obj.rawobject[offset]) + # TODO: now what ... ?? AFAICS, w_value is a pure python list, not an array? +# byteptr[0] = space.unwrap(space.id(w_value.getslotvalue(2))) + +class LongArrayConverter(LongPtrConverter): + def to_memory(self, space, w_obj, w_value, offset): + # copy the full array (uses byte copy for now) + fieldptr = self._get_fieldptr(space, w_obj, offset) + value = w_value.getslotvalue(2) + for i in range(min(self.size*4, value.getlength())): + fieldptr[i] = value.getitem(i) + class InstancePtrConverter(TypeConverter): _immutable_ = True @@ -271,6 +303,8 @@ _converters["unsigned short int*"] = ShortPtrConverter _converters["unsigned short int[]"] = ShortArrayConverter _converters["int"] = LongConverter +_converters["int*"] = LongPtrConverter +_converters["int[]"] = LongArrayConverter _converters["unsigned int"] = LongConverter _converters["long int"] = LongConverter _converters["unsigned long int"] = LongConverter Modified: pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py ============================================================================== --- pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py (original) +++ pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py Fri Sep 3 20:20:24 2010 @@ -98,7 +98,7 @@ # integer arrays import array a = range(self.N) - atypes = ['h', 'H']#, 'i', 'I', 'l', 'L' ] + atypes = ['h', 'H', 'i']#, 'I', 'l', 'L' ] for j in range(len(atypes)):#names)): b = array.array(atypes[j], a) exec 'c.m_%s_array = b' % names[j] # buffer copies From wlav at codespeak.net Fri Sep 3 20:50:53 2010 From: wlav at codespeak.net (wlav at codespeak.net) Date: Fri, 3 Sep 2010 20:50:53 +0200 (CEST) Subject: [pypy-svn] r76861 - pypy/branch/reflex-support/pypy/module/cppyy/test Message-ID: <20100903185053.6B36D282B9C@codespeak.net> Author: wlav Date: Fri Sep 3 20:50:49 2010 New Revision: 76861 Modified: pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py Log: Initial unsigned int array data member access. Modified: pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py ============================================================================== --- pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py (original) +++ pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py Fri Sep 3 20:50:49 2010 @@ -98,7 +98,7 @@ # integer arrays import array a = range(self.N) - atypes = ['h', 'H', 'i']#, 'I', 'l', 'L' ] + atypes = ['h', 'H', 'i', 'I']#, 'l', 'L' ] for j in range(len(atypes)):#names)): b = array.array(atypes[j], a) exec 'c.m_%s_array = b' % names[j] # buffer copies From wlav at codespeak.net Fri Sep 3 20:52:11 2010 From: wlav at codespeak.net (wlav at codespeak.net) Date: Fri, 3 Sep 2010 20:52:11 +0200 (CEST) Subject: [pypy-svn] r76862 - pypy/branch/reflex-support/pypy/module/cppyy/test Message-ID: <20100903185211.0B7E9282B9C@codespeak.net> Author: wlav Date: Fri Sep 3 20:52:08 2010 New Revision: 76862 Modified: pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py Log: Initial long array data member access. Modified: pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py ============================================================================== --- pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py (original) +++ pypy/branch/reflex-support/pypy/module/cppyy/test/test_datatypes.py Fri Sep 3 20:52:08 2010 @@ -98,7 +98,7 @@ # integer arrays import array a = range(self.N) - atypes = ['h', 'H', 'i', 'I']#, 'l', 'L' ] + atypes = ['h', 'H', 'i', 'I', 'l']#, 'L' ] for j in range(len(atypes)):#names)): b = array.array(atypes[j], a) exec 'c.m_%s_array = b' % names[j] # buffer copies From arigo at codespeak.net Sat Sep 4 11:44:20 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sat, 4 Sep 2010 11:44:20 +0200 (CEST) Subject: [pypy-svn] r76863 - pypy/trunk/pypy/module/posix/test Message-ID: <20100904094420.900B3282BF1@codespeak.net> Author: arigo Date: Sat Sep 4 11:44:18 2010 New Revision: 76863 Modified: pypy/trunk/pypy/module/posix/test/test_posix2.py Log: Skip this test. The problem was seen on Linux 2.6.31 on tannit. Modified: pypy/trunk/pypy/module/posix/test/test_posix2.py ============================================================================== --- pypy/trunk/pypy/module/posix/test/test_posix2.py (original) +++ pypy/trunk/pypy/module/posix/test/test_posix2.py Sat Sep 4 11:44:18 2010 @@ -324,6 +324,12 @@ if hasattr(__import__(os.name), "openpty"): def test_openpty(self): os = self.posix + g = os.popen("uname -r", "r") + version = g.read() + g.close() + if version.startswith('2.6.31'): + skip("openpty() deadlocks completely on " + "at least some Linux 2.6.31") master_fd, slave_fd = self.posix.openpty() try: assert isinstance(master_fd, int) From hakanardo at codespeak.net Sat Sep 4 12:16:15 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Sat, 4 Sep 2010 12:16:15 +0200 (CEST) Subject: [pypy-svn] r76864 - in pypy/branch/jit-bounds/pypy/jit/metainterp: optimizeopt test Message-ID: <20100904101615.81466282BF1@codespeak.net> Author: hakanardo Date: Sat Sep 4 12:16:13 2010 New Revision: 76864 Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/__init__.py pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/heap.py pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/rewrite.py pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_resume.py Log: sparated out virtualization and heap optimizations Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/__init__.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/__init__.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/__init__.py Sat Sep 4 12:16:13 2010 @@ -1,6 +1,8 @@ from optimizer import Optimizer from rewrite import OptRewrite from intbounds import OptIntBounds +from virtualize import OptVirtualize +from heap import OptHeap def optimize_loop_1(metainterp_sd, loop, virtuals=True): """Optimize loop.operations to make it match the input of loop.specnodes @@ -10,10 +12,10 @@ """ optimizations = [OptIntBounds(), OptRewrite(), + OptVirtualize(), + OptHeap(), ] - optimizer = Optimizer(metainterp_sd, loop, optimizations) - if virtuals: - optimizer.setup_virtuals_and_constants() + optimizer = Optimizer(metainterp_sd, loop, optimizations, virtuals) optimizer.propagate_all_forward() def optimize_bridge_1(metainterp_sd, bridge): Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/heap.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/heap.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/heap.py Sat Sep 4 12:16:13 2010 @@ -1,6 +1,267 @@ +from pypy.jit.metainterp.optimizeutil import _findall +from pypy.jit.metainterp.resoperation import rop, ResOperation +from pypy.rlib.objectmodel import we_are_translated + from optimizer import Optimization -class Heap(Optimization): +class CachedArrayItems(object): + def __init__(self): + self.fixed_index_items = {} + self.var_index_item = None + self.var_index_indexvalue = None + + +class OptHeap(Optimization): """Cache repeated heap accesses""" - # FIXME: Move here + def __init__(self): + # cached fields: {descr: {OptValue_instance: OptValue_fieldvalue}} + self.cached_fields = {} + # cached array items: {descr: CachedArrayItems} + self.cached_arrayitems = {} + # lazily written setfields (at most one per descr): {descr: op} + self.lazy_setfields = {} + self.lazy_setfields_descrs = [] # keys (at least) of previous dict + + def clean_caches(self): + self.cached_fields.clear() + self.cached_arrayitems.clear() + + def cache_field_value(self, descr, value, fieldvalue, write=False): + if write: + # when seeing a setfield, we have to clear the cache for the same + # field on any other structure, just in case they are aliasing + # each other + d = self.cached_fields[descr] = {} + else: + d = self.cached_fields.setdefault(descr, {}) + d[value] = fieldvalue + + def read_cached_field(self, descr, value): + # XXX self.cached_fields and self.lazy_setfields should probably + # be merged somehow + d = self.cached_fields.get(descr, None) + if d is None: + op = self.lazy_setfields.get(descr, None) + if op is None: + return None + return self.getvalue(op.args[1]) + return d.get(value, None) + + def cache_arrayitem_value(self, descr, value, indexvalue, fieldvalue, write=False): + d = self.cached_arrayitems.get(descr, None) + if d is None: + d = self.cached_arrayitems[descr] = {} + cache = d.get(value, None) + if cache is None: + cache = d[value] = CachedArrayItems() + indexbox = self.get_constant_box(indexvalue.box) + if indexbox is not None: + index = indexbox.getint() + if write: + for value, othercache in d.iteritems(): + # fixed index, clean the variable index cache, in case the + # index is the same + othercache.var_index_indexvalue = None + othercache.var_index_item = None + try: + del othercache.fixed_index_items[index] + except KeyError: + pass + cache.fixed_index_items[index] = fieldvalue + else: + if write: + for value, othercache in d.iteritems(): + # variable index, clear all caches for this descr + othercache.var_index_indexvalue = None + othercache.var_index_item = None + othercache.fixed_index_items.clear() + cache.var_index_indexvalue = indexvalue + cache.var_index_item = fieldvalue + + def read_cached_arrayitem(self, descr, value, indexvalue): + d = self.cached_arrayitems.get(descr, None) + if d is None: + return None + cache = d.get(value, None) + if cache is None: + return None + indexbox = self.get_constant_box(indexvalue.box) + if indexbox is not None: + return cache.fixed_index_items.get(indexbox.getint(), None) + elif cache.var_index_indexvalue is indexvalue: + return cache.var_index_item + return None + + def emit_operation(self, op): + self.emitting_operation(op) + self.next_optimization.propagate_forward(op) + + def emitting_operation(self, op): + if op.has_no_side_effect(): + return + if op.is_ovf(): + return + if op.is_guard(): + self.optimizer.pendingfields = self.force_lazy_setfields_for_guard() + return + opnum = op.opnum + if (opnum == rop.SETFIELD_GC or + opnum == rop.SETARRAYITEM_GC or + opnum == rop.DEBUG_MERGE_POINT): + return + assert opnum != rop.CALL_PURE + if (opnum == rop.CALL or + opnum == rop.CALL_MAY_FORCE or + opnum == rop.CALL_ASSEMBLER): + if opnum == rop.CALL_ASSEMBLER: + effectinfo = None + else: + effectinfo = op.descr.get_extra_info() + if effectinfo is not None: + # XXX we can get the wrong complexity here, if the lists + # XXX stored on effectinfo are large + for fielddescr in effectinfo.readonly_descrs_fields: + self.force_lazy_setfield(fielddescr) + for fielddescr in effectinfo.write_descrs_fields: + self.force_lazy_setfield(fielddescr) + try: + del self.cached_fields[fielddescr] + except KeyError: + pass + for arraydescr in effectinfo.write_descrs_arrays: + try: + del self.cached_arrayitems[arraydescr] + except KeyError: + pass + if effectinfo.check_forces_virtual_or_virtualizable(): + vrefinfo = self.optimizer.metainterp_sd.virtualref_info + self.force_lazy_setfield(vrefinfo.descr_forced) + # ^^^ we only need to force this field; the other fields + # of virtualref_info and virtualizable_info are not gcptrs. + return + self.force_all_lazy_setfields() + elif op.is_final() or (not we_are_translated() and + op.opnum < 0): # escape() operations + self.force_all_lazy_setfields() + self.clean_caches() + + + def force_lazy_setfield(self, descr, before_guard=False): + try: + op = self.lazy_setfields[descr] + except KeyError: + return + del self.lazy_setfields[descr] + ###self.optimizer._emit_operation(op) + self.next_optimization.propagate_forward(op) + # + # hackish: reverse the order of the last two operations if it makes + # sense to avoid a situation like "int_eq/setfield_gc/guard_true", + # which the backend (at least the x86 backend) does not handle well. + newoperations = self.optimizer.newoperations + if before_guard and len(newoperations) >= 2: + lastop = newoperations[-1] + prevop = newoperations[-2] + # - is_comparison() for cases like "int_eq/setfield_gc/guard_true" + # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced" + # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow" + opnum = prevop.opnum + if ((prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE + or prevop.is_ovf()) + and prevop.result not in lastop.args): + newoperations[-2] = lastop + newoperations[-1] = prevop + + def force_all_lazy_setfields(self): + if len(self.lazy_setfields_descrs) > 0: + for descr in self.lazy_setfields_descrs: + self.force_lazy_setfield(descr) + del self.lazy_setfields_descrs[:] + + def force_lazy_setfields_for_guard(self): + pendingfields = [] + for descr in self.lazy_setfields_descrs: + try: + op = self.lazy_setfields[descr] + except KeyError: + continue + # the only really interesting case that we need to handle in the + # guards' resume data is that of a virtual object that is stored + # into a field of a non-virtual object. + value = self.getvalue(op.args[0]) + assert not value.is_virtual() # it must be a non-virtual + fieldvalue = self.getvalue(op.args[1]) + if fieldvalue.is_virtual(): + # this is the case that we leave to resume.py + pendingfields.append((descr, value.box, + fieldvalue.get_key_box())) + else: + self.force_lazy_setfield(descr, before_guard=True) + return pendingfields + + def force_lazy_setfield_if_necessary(self, op, value, write=False): + try: + op1 = self.lazy_setfields[op.descr] + except KeyError: + if write: + self.lazy_setfields_descrs.append(op.descr) + else: + if self.getvalue(op1.args[0]) is not value: + self.force_lazy_setfield(op.descr) + + def optimize_GETFIELD_GC(self, op): + value = self.getvalue(op.args[0]) + self.force_lazy_setfield_if_necessary(op, value) + # check if the field was read from another getfield_gc just before + # or has been written to recently + fieldvalue = self.read_cached_field(op.descr, value) + if fieldvalue is not None: + self.make_equal_to(op.result, fieldvalue) + return + # default case: produce the operation + value.ensure_nonnull() + ###self.optimizer.optimize_default(op) + self.emit_operation(op) # FIXME: These might need constant propagation? + # then remember the result of reading the field + fieldvalue = self.getvalue(op.result) + self.cache_field_value(op.descr, value, fieldvalue) + + def optimize_SETFIELD_GC(self, op): + value = self.getvalue(op.args[0]) + fieldvalue = self.getvalue(op.args[1]) + self.force_lazy_setfield_if_necessary(op, value, write=True) + self.lazy_setfields[op.descr] = op + # remember the result of future reads of the field + self.cache_field_value(op.descr, value, fieldvalue, write=True) + + def optimize_GETARRAYITEM_GC(self, op): + value = self.getvalue(op.args[0]) + indexvalue = self.getvalue(op.args[1]) + fieldvalue = self.read_cached_arrayitem(op.descr, value, indexvalue) + if fieldvalue is not None: + self.make_equal_to(op.result, fieldvalue) + return + ###self.optimizer.optimize_default(op) + self.emit_operation(op) # FIXME: These might need constant propagation? + fieldvalue = self.getvalue(op.result) + self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue) + + def optimize_SETARRAYITEM_GC(self, op): + self.emit_operation(op) + value = self.getvalue(op.args[0]) + fieldvalue = self.getvalue(op.args[2]) + indexvalue = self.getvalue(op.args[1]) + self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue, + write=True) + + def propagate_forward(self, op): + opnum = op.opnum + for value, func in optimize_ops: + if opnum == value: + func(self, op) + break + else: + self.emit_operation(op) + +optimize_ops = _findall(OptHeap, 'optimize_') Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py Sat Sep 4 12:16:13 2010 @@ -4,17 +4,11 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp import jitprof from pypy.jit.metainterp.executor import execute_nonspec -from pypy.jit.metainterp.specnode import SpecNode, NotSpecNode, ConstantSpecNode -from pypy.jit.metainterp.specnode import AbstractVirtualStructSpecNode -from pypy.jit.metainterp.specnode import VirtualInstanceSpecNode -from pypy.jit.metainterp.specnode import VirtualArraySpecNode -from pypy.jit.metainterp.specnode import VirtualStructSpecNode from pypy.jit.metainterp.optimizeutil import _findall, sort_descrs from pypy.jit.metainterp.optimizeutil import descrlist_dict from pypy.jit.metainterp.optimizeutil import InvalidLoop, args_dict from pypy.jit.metainterp import resume, compile from pypy.jit.metainterp.typesystem import llhelper, oohelper -from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.lltypesystem import lltype from pypy.jit.metainterp.history import AbstractDescr, make_hashable_int from intutils import IntBound, IntUnbounded @@ -143,250 +137,6 @@ llhelper.CVAL_NULLREF = ConstantValue(llhelper.CONST_NULL) oohelper.CVAL_NULLREF = ConstantValue(oohelper.CONST_NULL) - -class AbstractVirtualValue(OptValue): - _attrs_ = ('optimizer', 'keybox', 'source_op', '_cached_vinfo') - box = None - level = LEVEL_NONNULL - _cached_vinfo = None - - def __init__(self, optimizer, keybox, source_op=None): - self.optimizer = optimizer - self.keybox = keybox # only used as a key in dictionaries - self.source_op = source_op # the NEW_WITH_VTABLE/NEW_ARRAY operation - # that builds this box - - def get_key_box(self): - if self.box is None: - return self.keybox - return self.box - - def force_box(self): - if self.box is None: - self.optimizer.forget_numberings(self.keybox) - self._really_force() - return self.box - - def make_virtual_info(self, modifier, fieldnums): - vinfo = self._cached_vinfo - if vinfo is not None and vinfo.equals(fieldnums): - return vinfo - vinfo = self._make_virtual(modifier) - vinfo.set_content(fieldnums) - self._cached_vinfo = vinfo - return vinfo - - def _make_virtual(self, modifier): - raise NotImplementedError("abstract base") - - def _really_force(self): - raise NotImplementedError("abstract base") - -def get_fielddescrlist_cache(cpu): - if not hasattr(cpu, '_optimizeopt_fielddescrlist_cache'): - result = descrlist_dict() - cpu._optimizeopt_fielddescrlist_cache = result - return result - return cpu._optimizeopt_fielddescrlist_cache -get_fielddescrlist_cache._annspecialcase_ = "specialize:memo" - -class AbstractVirtualStructValue(AbstractVirtualValue): - _attrs_ = ('_fields', '_cached_sorted_fields') - - def __init__(self, optimizer, keybox, source_op=None): - AbstractVirtualValue.__init__(self, optimizer, keybox, source_op) - self._fields = {} - self._cached_sorted_fields = None - - def getfield(self, ofs, default): - return self._fields.get(ofs, default) - - def setfield(self, ofs, fieldvalue): - assert isinstance(fieldvalue, OptValue) - self._fields[ofs] = fieldvalue - - def _really_force(self): - assert self.source_op is not None - # ^^^ This case should not occur any more (see test_bug_3). - # - newoperations = self.optimizer.newoperations - newoperations.append(self.source_op) - self.box = box = self.source_op.result - # - iteritems = self._fields.iteritems() - if not we_are_translated(): #random order is fine, except for tests - iteritems = list(iteritems) - iteritems.sort(key = lambda (x,y): x.sort_key()) - for ofs, value in iteritems: - if value.is_null(): - continue - subbox = value.force_box() - op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, - descr=ofs) - newoperations.append(op) - self._fields = None - - def _get_field_descr_list(self): - _cached_sorted_fields = self._cached_sorted_fields - if (_cached_sorted_fields is not None and - len(self._fields) == len(_cached_sorted_fields)): - lst = self._cached_sorted_fields - else: - lst = self._fields.keys() - sort_descrs(lst) - cache = get_fielddescrlist_cache(self.optimizer.cpu) - result = cache.get(lst, None) - if result is None: - cache[lst] = lst - else: - lst = result - # store on self, to not have to repeatedly get it from the global - # cache, which involves sorting - self._cached_sorted_fields = lst - return lst - - def get_args_for_fail(self, modifier): - if self.box is None and not modifier.already_seen_virtual(self.keybox): - # checks for recursion: it is False unless - # we have already seen the very same keybox - lst = self._get_field_descr_list() - fieldboxes = [self._fields[ofs].get_key_box() for ofs in lst] - modifier.register_virtual_fields(self.keybox, fieldboxes) - for ofs in lst: - fieldvalue = self._fields[ofs] - fieldvalue.get_args_for_fail(modifier) - - -class VirtualValue(AbstractVirtualStructValue): - level = LEVEL_KNOWNCLASS - - def __init__(self, optimizer, known_class, keybox, source_op=None): - AbstractVirtualStructValue.__init__(self, optimizer, keybox, source_op) - assert isinstance(known_class, Const) - self.known_class = known_class - - def _make_virtual(self, modifier): - fielddescrs = self._get_field_descr_list() - return modifier.make_virtual(self.known_class, fielddescrs) - -class VStructValue(AbstractVirtualStructValue): - - def __init__(self, optimizer, structdescr, keybox, source_op=None): - AbstractVirtualStructValue.__init__(self, optimizer, keybox, source_op) - self.structdescr = structdescr - - def _make_virtual(self, modifier): - fielddescrs = self._get_field_descr_list() - return modifier.make_vstruct(self.structdescr, fielddescrs) - -class VArrayValue(AbstractVirtualValue): - - def __init__(self, optimizer, arraydescr, size, keybox, source_op=None): - AbstractVirtualValue.__init__(self, optimizer, keybox, source_op) - self.arraydescr = arraydescr - self.constvalue = optimizer.new_const_item(arraydescr) - self._items = [self.constvalue] * size - - def getlength(self): - return len(self._items) - - def getitem(self, index): - res = self._items[index] - return res - - def setitem(self, index, itemvalue): - assert isinstance(itemvalue, OptValue) - self._items[index] = itemvalue - - def _really_force(self): - assert self.source_op is not None - newoperations = self.optimizer.newoperations - newoperations.append(self.source_op) - self.box = box = self.source_op.result - for index in range(len(self._items)): - subvalue = self._items[index] - if subvalue is not self.constvalue: - if subvalue.is_null(): - continue - subbox = subvalue.force_box() - op = ResOperation(rop.SETARRAYITEM_GC, - [box, ConstInt(index), subbox], None, - descr=self.arraydescr) - newoperations.append(op) - - def get_args_for_fail(self, modifier): - if self.box is None and not modifier.already_seen_virtual(self.keybox): - # checks for recursion: it is False unless - # we have already seen the very same keybox - itemboxes = [] - for itemvalue in self._items: - itemboxes.append(itemvalue.get_key_box()) - modifier.register_virtual_fields(self.keybox, itemboxes) - for itemvalue in self._items: - if itemvalue is not self.constvalue: - itemvalue.get_args_for_fail(modifier) - - def _make_virtual(self, modifier): - return modifier.make_varray(self.arraydescr) - -class __extend__(SpecNode): - def setup_virtual_node(self, optimizer, box, newinputargs): - raise NotImplementedError - def teardown_virtual_node(self, optimizer, value, newexitargs): - raise NotImplementedError - -class __extend__(NotSpecNode): - def setup_virtual_node(self, optimizer, box, newinputargs): - newinputargs.append(box) - def teardown_virtual_node(self, optimizer, value, newexitargs): - newexitargs.append(value.force_box()) - -class __extend__(ConstantSpecNode): - def setup_virtual_node(self, optimizer, box, newinputargs): - optimizer.make_constant(box, self.constbox) - def teardown_virtual_node(self, optimizer, value, newexitargs): - pass - -class __extend__(AbstractVirtualStructSpecNode): - def setup_virtual_node(self, optimizer, box, newinputargs): - vvalue = self._setup_virtual_node_1(optimizer, box) - for ofs, subspecnode in self.fields: - subbox = optimizer.new_box(ofs) - subspecnode.setup_virtual_node(optimizer, subbox, newinputargs) - vvaluefield = optimizer.getvalue(subbox) - vvalue.setfield(ofs, vvaluefield) - def _setup_virtual_node_1(self, optimizer, box): - raise NotImplementedError - def teardown_virtual_node(self, optimizer, value, newexitargs): - assert value.is_virtual() - for ofs, subspecnode in self.fields: - subvalue = value.getfield(ofs, optimizer.new_const(ofs)) - subspecnode.teardown_virtual_node(optimizer, subvalue, newexitargs) - -class __extend__(VirtualInstanceSpecNode): - def _setup_virtual_node_1(self, optimizer, box): - return optimizer.make_virtual(self.known_class, box) - -class __extend__(VirtualStructSpecNode): - def _setup_virtual_node_1(self, optimizer, box): - return optimizer.make_vstruct(self.typedescr, box) - -class __extend__(VirtualArraySpecNode): - def setup_virtual_node(self, optimizer, box, newinputargs): - vvalue = optimizer.make_varray(self.arraydescr, len(self.items), box) - for index in range(len(self.items)): - subbox = optimizer.new_box_item(self.arraydescr) - subspecnode = self.items[index] - subspecnode.setup_virtual_node(optimizer, subbox, newinputargs) - vvalueitem = optimizer.getvalue(subbox) - vvalue.setitem(index, vvalueitem) - def teardown_virtual_node(self, optimizer, value, newexitargs): - assert value.is_virtual() - for index in range(len(self.items)): - subvalue = value.getitem(index) - subspecnode = self.items[index] - subspecnode.teardown_virtual_node(optimizer, subvalue, newexitargs) - class Optimization(object): def propagate_forward(self, op): raise NotImplementedError @@ -394,6 +144,7 @@ def emit_operation(self, op): self.next_optimization.propagate_forward(op) + # FIXME: Move some of these here? def getvalue(self, box): return self.optimizer.getvalue(box) @@ -406,6 +157,21 @@ def make_equal_to(self, box, value): return self.optimizer.make_equal_to(box, value) + def get_constant_box(self, box): + return self.optimizer.get_constant_box(box) + + def new_box(self, fieldofs): + return self.optimizer.new_box(fieldofs) + + def new_const(self, fieldofs): + return self.optimizer.new_const(fieldofs) + + def new_box_item(self, arraydescr): + return self.optimizer.new_box_item(arraydescr) + + def new_const_item(self, arraydescr): + return self.optimizer.new_const_item(arraydescr) + def pure(self, opnum, args, result): op = ResOperation(opnum, args, result) self.optimizer.pure_operations[self.optimizer.make_args_key(op)] = op @@ -416,20 +182,23 @@ def skip_nextop(self): self.optimizer.i += 1 + def setup(self, virtuals): + pass + class Optimizer(Optimization): - def __init__(self, metainterp_sd, loop, optimizations=[]): + def __init__(self, metainterp_sd, loop, optimizations=[], virtuals=True): self.metainterp_sd = metainterp_sd self.cpu = metainterp_sd.cpu self.loop = loop self.values = {} self.interned_refs = self.cpu.ts.new_ref_dict() self.resumedata_memo = resume.ResumeDataLoopMemo(metainterp_sd) - self.heap_op_optimizer = HeapOpOptimizer(self) self.bool_boxes = {} self.loop_invariant_results = {} self.pure_operations = args_dict() self.producer = {} + self.pendingfields = [] if len(optimizations) == 0: self.first_optimization = self @@ -440,6 +209,7 @@ optimizations[-1].next_optimization = self for o in optimizations: o.optimizer = self + o.setup(virtuals) def forget_numberings(self, virtualbox): self.metainterp_sd.profiler.count(jitprof.OPT_FORCINGS) @@ -488,21 +258,6 @@ def make_constant_int(self, box, intvalue): self.make_constant(box, ConstInt(intvalue)) - def make_virtual(self, known_class, box, source_op=None): - vvalue = VirtualValue(self, known_class, box, source_op) - self.make_equal_to(box, vvalue) - return vvalue - - def make_varray(self, arraydescr, size, box, source_op=None): - vvalue = VArrayValue(self, arraydescr, size, box, source_op) - self.make_equal_to(box, vvalue) - return vvalue - - def make_vstruct(self, structdescr, box, source_op=None): - vvalue = VStructValue(self, structdescr, box, source_op) - self.make_equal_to(box, vvalue) - return vvalue - def new_ptr_box(self): return self.cpu.ts.BoxRef() @@ -538,25 +293,13 @@ else: return CVAL_ZERO - # ---------- - - def setup_virtuals_and_constants(self): - inputargs = self.loop.inputargs - specnodes = self.loop.token.specnodes - assert len(inputargs) == len(specnodes) - newinputargs = [] - for i in range(len(inputargs)): - specnodes[i].setup_virtual_node(self, inputargs[i], newinputargs) - self.loop.inputargs = newinputargs - - # ---------- - def propagate_all_forward(self): self.exception_might_have_happened = False self.newoperations = [] self.i = 0 while self.i < len(self.loop.operations): op = self.loop.operations[self.i] + #print "OP: %s" % op self.first_optimization.propagate_forward(op) self.i += 1 self.loop.operations = self.newoperations @@ -572,10 +315,11 @@ break else: self.optimize_default(op) + #print '\n'.join([str(o) for o in self.newoperations]) + '\n---\n' def emit_operation(self, op): - self.heap_op_optimizer.emitting_operation(op) + ###self.heap_op_optimizer.emitting_operation(op) self._emit_operation(op) def _emit_operation(self, op): @@ -595,11 +339,11 @@ self.newoperations.append(op) def store_final_boxes_in_guard(self, op): - pendingfields = self.heap_op_optimizer.force_lazy_setfields_for_guard() + ###pendingfields = self.heap_op_optimizer.force_lazy_setfields_for_guard() descr = op.descr assert isinstance(descr, compile.ResumeGuardDescr) modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo) - newboxes = modifier.finish(self.values, pendingfields) + newboxes = modifier.finish(self.values, self.pendingfields) if len(newboxes) > self.metainterp_sd.options.failargs_limit: # XXX be careful here compile.giveup() descr.store_final_boxes(op, newboxes) @@ -667,125 +411,11 @@ # otherwise, the operation remains self.emit_operation(op) - def optimize_JUMP(self, op): - orgop = self.loop.operations[-1] - exitargs = [] - target_loop_token = orgop.descr - assert isinstance(target_loop_token, LoopToken) - specnodes = target_loop_token.specnodes - assert len(op.args) == len(specnodes) - for i in range(len(specnodes)): - value = self.getvalue(op.args[i]) - specnodes[i].teardown_virtual_node(self, value, exitargs) - op.args = exitargs[:] - self.emit_operation(op) - - def optimize_guard(self, op, constbox, emit_operation=True): - value = self.getvalue(op.args[0]) - if value.is_constant(): - box = value.box - assert isinstance(box, Const) - if not box.same_constant(constbox): - raise InvalidLoop - return - if emit_operation: - self.emit_operation(op) - value.make_constant(constbox) - - def optimize_GUARD_ISNULL(self, op): - value = self.getvalue(op.args[0]) - if value.is_null(): - return - elif value.is_nonnull(): - raise InvalidLoop - self.emit_operation(op) - value.make_constant(self.cpu.ts.CONST_NULL) - - def optimize_GUARD_NONNULL(self, op): - value = self.getvalue(op.args[0]) - if value.is_nonnull(): - return - elif value.is_null(): - raise InvalidLoop - self.emit_operation(op) - value.make_nonnull(len(self.newoperations) - 1) - - def optimize_GUARD_VALUE(self, op): - value = self.getvalue(op.args[0]) - emit_operation = True - if value.last_guard_index != -1: - # there already has been a guard_nonnull or guard_class or - # guard_nonnull_class on this value, which is rather silly. - # replace the original guard with a guard_value - old_guard_op = self.newoperations[value.last_guard_index] - old_opnum = old_guard_op.opnum - old_guard_op.opnum = rop.GUARD_VALUE - old_guard_op.args = [old_guard_op.args[0], op.args[1]] - # hack hack hack. Change the guard_opnum on - # old_guard_op.descr so that when resuming, - # the operation is not skipped by pyjitpl.py. - descr = old_guard_op.descr - assert isinstance(descr, compile.ResumeGuardDescr) - descr.guard_opnum = rop.GUARD_VALUE - descr.make_a_counter_per_value(old_guard_op) - emit_operation = False - constbox = op.args[1] - assert isinstance(constbox, Const) - self.optimize_guard(op, constbox, emit_operation) - - def optimize_GUARD_TRUE(self, op): - self.optimize_guard(op, CONST_1) - - def optimize_GUARD_FALSE(self, op): - self.optimize_guard(op, CONST_0) - - def optimize_GUARD_CLASS(self, op): - value = self.getvalue(op.args[0]) - expectedclassbox = op.args[1] - assert isinstance(expectedclassbox, Const) - realclassbox = value.get_constant_class(self.cpu) - if realclassbox is not None: - # the following assert should always be true for now, - # because invalid loops that would fail it are detected - # earlier, in optimizefindnode.py. - assert realclassbox.same_constant(expectedclassbox) - return - emit_operation = True - if value.last_guard_index != -1: - # there already has been a guard_nonnull or guard_class or - # guard_nonnull_class on this value. - old_guard_op = self.newoperations[value.last_guard_index] - if old_guard_op.opnum == rop.GUARD_NONNULL: - # it was a guard_nonnull, which we replace with a - # guard_nonnull_class. - old_guard_op.opnum = rop.GUARD_NONNULL_CLASS - old_guard_op.args = [old_guard_op.args[0], op.args[1]] - # hack hack hack. Change the guard_opnum on - # old_guard_op.descr so that when resuming, - # the operation is not skipped by pyjitpl.py. - descr = old_guard_op.descr - assert isinstance(descr, compile.ResumeGuardDescr) - descr.guard_opnum = rop.GUARD_NONNULL_CLASS - emit_operation = False - if emit_operation: - self.emit_operation(op) - last_guard_index = len(self.newoperations) - 1 - else: - last_guard_index = value.last_guard_index - value.make_constant_class(expectedclassbox, last_guard_index) - - def optimize_GUARD_NO_EXCEPTION(self, op): - if not self.exception_might_have_happened: - return - self.emit_operation(op) - self.exception_might_have_happened = False - def optimize_GUARD_NO_OVERFLOW(self, op): # otherwise the default optimizer will clear fields, which is unwanted # in this case self.emit_operation(op) - def _optimize_nullness(self, op, box, expect_nonnull): value = self.getvalue(box) if value.is_nonnull(): @@ -838,150 +468,6 @@ def optimize_PTR_EQ(self, op): self._optimize_oois_ooisnot(op, False) - def optimize_VIRTUAL_REF(self, op): - indexbox = op.args[1] - # - # get some constants - vrefinfo = self.metainterp_sd.virtualref_info - c_cls = vrefinfo.jit_virtual_ref_const_class - descr_virtual_token = vrefinfo.descr_virtual_token - descr_virtualref_index = vrefinfo.descr_virtualref_index - # - # Replace the VIRTUAL_REF operation with a virtual structure of type - # 'jit_virtual_ref'. The jit_virtual_ref structure may be forced soon, - # but the point is that doing so does not force the original structure. - op = ResOperation(rop.NEW_WITH_VTABLE, [c_cls], op.result) - vrefvalue = self.make_virtual(c_cls, op.result, op) - tokenbox = BoxInt() - self.emit_operation(ResOperation(rop.FORCE_TOKEN, [], tokenbox)) - vrefvalue.setfield(descr_virtual_token, self.getvalue(tokenbox)) - vrefvalue.setfield(descr_virtualref_index, self.getvalue(indexbox)) - - def optimize_VIRTUAL_REF_FINISH(self, op): - # Set the 'forced' field of the virtual_ref. - # In good cases, this is all virtual, so has no effect. - # Otherwise, this forces the real object -- but only now, as - # opposed to much earlier. This is important because the object is - # typically a PyPy PyFrame, and now is the end of its execution, so - # forcing it now does not have catastrophic effects. - vrefinfo = self.metainterp_sd.virtualref_info - # op.args[1] should really never point to null here - # - set 'forced' to point to the real object - op1 = ResOperation(rop.SETFIELD_GC, op.args, None, - descr = vrefinfo.descr_forced) - self.optimize_SETFIELD_GC(op1) - # - set 'virtual_token' to TOKEN_NONE - args = [op.args[0], ConstInt(vrefinfo.TOKEN_NONE)] - op1 = ResOperation(rop.SETFIELD_GC, args, None, - descr = vrefinfo.descr_virtual_token) - self.optimize_SETFIELD_GC(op1) - # Note that in some cases the virtual in op.args[1] has been forced - # already. This is fine. In that case, and *if* a residual - # CALL_MAY_FORCE suddenly turns out to access it, then it will - # trigger a ResumeGuardForcedDescr.handle_async_forcing() which - # will work too (but just be a little pointless, as the structure - # was already forced). - - def optimize_GETFIELD_GC(self, op): - value = self.getvalue(op.args[0]) - if value.is_virtual(): - # optimizefindnode should ensure that fieldvalue is found - assert isinstance(value, AbstractVirtualValue) - fieldvalue = value.getfield(op.descr, None) - assert fieldvalue is not None - self.make_equal_to(op.result, fieldvalue) - else: - value.ensure_nonnull() - self.heap_op_optimizer.optimize_GETFIELD_GC(op, value) - - # note: the following line does not mean that the two operations are - # completely equivalent, because GETFIELD_GC_PURE is_always_pure(). - optimize_GETFIELD_GC_PURE = optimize_GETFIELD_GC - - def optimize_SETFIELD_GC(self, op): - value = self.getvalue(op.args[0]) - fieldvalue = self.getvalue(op.args[1]) - if value.is_virtual(): - value.setfield(op.descr, fieldvalue) - else: - value.ensure_nonnull() - self.heap_op_optimizer.optimize_SETFIELD_GC(op, value, fieldvalue) - - def optimize_NEW_WITH_VTABLE(self, op): - self.make_virtual(op.args[0], op.result, op) - - def optimize_NEW(self, op): - self.make_vstruct(op.descr, op.result, op) - - def optimize_NEW_ARRAY(self, op): - sizebox = self.get_constant_box(op.args[0]) - if sizebox is not None: - # if the original 'op' did not have a ConstInt as argument, - # build a new one with the ConstInt argument - if not isinstance(op.args[0], ConstInt): - op = ResOperation(rop.NEW_ARRAY, [sizebox], op.result, - descr=op.descr) - self.make_varray(op.descr, sizebox.getint(), op.result, op) - else: - self.optimize_default(op) - - def optimize_ARRAYLEN_GC(self, op): - value = self.getvalue(op.args[0]) - if value.is_virtual(): - self.make_constant_int(op.result, value.getlength()) - else: - value.ensure_nonnull() - self.optimize_default(op) - - def optimize_GETARRAYITEM_GC(self, op): - value = self.getvalue(op.args[0]) - if value.is_virtual(): - indexbox = self.get_constant_box(op.args[1]) - if indexbox is not None: - itemvalue = value.getitem(indexbox.getint()) - self.make_equal_to(op.result, itemvalue) - return - value.ensure_nonnull() - self.heap_op_optimizer.optimize_GETARRAYITEM_GC(op, value) - - # note: the following line does not mean that the two operations are - # completely equivalent, because GETARRAYITEM_GC_PURE is_always_pure(). - optimize_GETARRAYITEM_GC_PURE = optimize_GETARRAYITEM_GC - - def optimize_SETARRAYITEM_GC(self, op): - value = self.getvalue(op.args[0]) - if value.is_virtual(): - indexbox = self.get_constant_box(op.args[1]) - if indexbox is not None: - value.setitem(indexbox.getint(), self.getvalue(op.args[2])) - return - value.ensure_nonnull() - fieldvalue = self.getvalue(op.args[2]) - self.heap_op_optimizer.optimize_SETARRAYITEM_GC(op, value, fieldvalue) - - def optimize_ARRAYCOPY(self, op): - source_value = self.getvalue(op.args[2]) - dest_value = self.getvalue(op.args[3]) - source_start_box = self.get_constant_box(op.args[4]) - dest_start_box = self.get_constant_box(op.args[5]) - length = self.get_constant_box(op.args[6]) - if (source_value.is_virtual() and source_start_box and dest_start_box - and length and dest_value.is_virtual()): - # XXX optimize the case where dest value is not virtual, - # but we still can avoid a mess - source_start = source_start_box.getint() - dest_start = dest_start_box.getint() - for index in range(length.getint()): - val = source_value.getitem(index + source_start) - dest_value.setitem(index + dest_start, val) - return - if length and length.getint() == 0: - return # 0-length arraycopy - descr = op.args[0] - assert isinstance(descr, AbstractDescr) - self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result, - descr)) - def optimize_INSTANCEOF(self, op): value = self.getvalue(op.args[0]) realclassbox = value.get_constant_class(self.cpu) @@ -1013,254 +499,7 @@ resvalue = self.getvalue(op.result) self.loop_invariant_results[key] = resvalue - def optimize_CALL_PURE(self, op): - for arg in op.args: - if self.get_constant_box(arg) is None: - break - else: - # all constant arguments: constant-fold away - self.make_constant(op.result, op.args[0]) - return - # replace CALL_PURE with just CALL - self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result, - op.descr)) - optimize_ops = _findall(Optimizer, 'optimize_') -class CachedArrayItems(object): - def __init__(self): - self.fixed_index_items = {} - self.var_index_item = None - self.var_index_indexvalue = None - - -class HeapOpOptimizer(object): - def __init__(self, optimizer): - self.optimizer = optimizer - # cached fields: {descr: {OptValue_instance: OptValue_fieldvalue}} - self.cached_fields = {} - # cached array items: {descr: CachedArrayItems} - self.cached_arrayitems = {} - # lazily written setfields (at most one per descr): {descr: op} - self.lazy_setfields = {} - self.lazy_setfields_descrs = [] # keys (at least) of previous dict - - def clean_caches(self): - self.cached_fields.clear() - self.cached_arrayitems.clear() - - def cache_field_value(self, descr, value, fieldvalue, write=False): - if write: - # when seeing a setfield, we have to clear the cache for the same - # field on any other structure, just in case they are aliasing - # each other - d = self.cached_fields[descr] = {} - else: - d = self.cached_fields.setdefault(descr, {}) - d[value] = fieldvalue - - def read_cached_field(self, descr, value): - # XXX self.cached_fields and self.lazy_setfields should probably - # be merged somehow - d = self.cached_fields.get(descr, None) - if d is None: - op = self.lazy_setfields.get(descr, None) - if op is None: - return None - return self.optimizer.getvalue(op.args[1]) - return d.get(value, None) - - def cache_arrayitem_value(self, descr, value, indexvalue, fieldvalue, write=False): - d = self.cached_arrayitems.get(descr, None) - if d is None: - d = self.cached_arrayitems[descr] = {} - cache = d.get(value, None) - if cache is None: - cache = d[value] = CachedArrayItems() - indexbox = self.optimizer.get_constant_box(indexvalue.box) - if indexbox is not None: - index = indexbox.getint() - if write: - for value, othercache in d.iteritems(): - # fixed index, clean the variable index cache, in case the - # index is the same - othercache.var_index_indexvalue = None - othercache.var_index_item = None - try: - del othercache.fixed_index_items[index] - except KeyError: - pass - cache.fixed_index_items[index] = fieldvalue - else: - if write: - for value, othercache in d.iteritems(): - # variable index, clear all caches for this descr - othercache.var_index_indexvalue = None - othercache.var_index_item = None - othercache.fixed_index_items.clear() - cache.var_index_indexvalue = indexvalue - cache.var_index_item = fieldvalue - - def read_cached_arrayitem(self, descr, value, indexvalue): - d = self.cached_arrayitems.get(descr, None) - if d is None: - return None - cache = d.get(value, None) - if cache is None: - return None - indexbox = self.optimizer.get_constant_box(indexvalue.box) - if indexbox is not None: - return cache.fixed_index_items.get(indexbox.getint(), None) - elif cache.var_index_indexvalue is indexvalue: - return cache.var_index_item - return None - - def emitting_operation(self, op): - if op.has_no_side_effect(): - return - if op.is_ovf(): - return - if op.is_guard(): - return - opnum = op.opnum - if (opnum == rop.SETFIELD_GC or - opnum == rop.SETARRAYITEM_GC or - opnum == rop.DEBUG_MERGE_POINT): - return - assert opnum != rop.CALL_PURE - if (opnum == rop.CALL or - opnum == rop.CALL_MAY_FORCE or - opnum == rop.CALL_ASSEMBLER): - if opnum == rop.CALL_ASSEMBLER: - effectinfo = None - else: - effectinfo = op.descr.get_extra_info() - if effectinfo is not None: - # XXX we can get the wrong complexity here, if the lists - # XXX stored on effectinfo are large - for fielddescr in effectinfo.readonly_descrs_fields: - self.force_lazy_setfield(fielddescr) - for fielddescr in effectinfo.write_descrs_fields: - self.force_lazy_setfield(fielddescr) - try: - del self.cached_fields[fielddescr] - except KeyError: - pass - for arraydescr in effectinfo.write_descrs_arrays: - try: - del self.cached_arrayitems[arraydescr] - except KeyError: - pass - if effectinfo.check_forces_virtual_or_virtualizable(): - vrefinfo = self.optimizer.metainterp_sd.virtualref_info - self.force_lazy_setfield(vrefinfo.descr_forced) - # ^^^ we only need to force this field; the other fields - # of virtualref_info and virtualizable_info are not gcptrs. - return - self.force_all_lazy_setfields() - elif op.is_final() or (not we_are_translated() and - op.opnum < 0): # escape() operations - self.force_all_lazy_setfields() - self.clean_caches() - - def force_lazy_setfield(self, descr, before_guard=False): - try: - op = self.lazy_setfields[descr] - except KeyError: - return - del self.lazy_setfields[descr] - self.optimizer._emit_operation(op) - # - # hackish: reverse the order of the last two operations if it makes - # sense to avoid a situation like "int_eq/setfield_gc/guard_true", - # which the backend (at least the x86 backend) does not handle well. - newoperations = self.optimizer.newoperations - if before_guard and len(newoperations) >= 2: - lastop = newoperations[-1] - prevop = newoperations[-2] - # - is_comparison() for cases like "int_eq/setfield_gc/guard_true" - # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced" - # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow" - opnum = prevop.opnum - if ((prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE - or prevop.is_ovf()) - and prevop.result not in lastop.args): - newoperations[-2] = lastop - newoperations[-1] = prevop - - def force_all_lazy_setfields(self): - if len(self.lazy_setfields_descrs) > 0: - for descr in self.lazy_setfields_descrs: - self.force_lazy_setfield(descr) - del self.lazy_setfields_descrs[:] - - def force_lazy_setfields_for_guard(self): - pendingfields = [] - for descr in self.lazy_setfields_descrs: - try: - op = self.lazy_setfields[descr] - except KeyError: - continue - # the only really interesting case that we need to handle in the - # guards' resume data is that of a virtual object that is stored - # into a field of a non-virtual object. - value = self.optimizer.getvalue(op.args[0]) - assert not value.is_virtual() # it must be a non-virtual - fieldvalue = self.optimizer.getvalue(op.args[1]) - if fieldvalue.is_virtual(): - # this is the case that we leave to resume.py - pendingfields.append((descr, value.box, - fieldvalue.get_key_box())) - else: - self.force_lazy_setfield(descr, before_guard=True) - return pendingfields - - def force_lazy_setfield_if_necessary(self, op, value, write=False): - try: - op1 = self.lazy_setfields[op.descr] - except KeyError: - if write: - self.lazy_setfields_descrs.append(op.descr) - else: - if self.optimizer.getvalue(op1.args[0]) is not value: - self.force_lazy_setfield(op.descr) - - def optimize_GETFIELD_GC(self, op, value): - self.force_lazy_setfield_if_necessary(op, value) - # check if the field was read from another getfield_gc just before - # or has been written to recently - fieldvalue = self.read_cached_field(op.descr, value) - if fieldvalue is not None: - self.optimizer.make_equal_to(op.result, fieldvalue) - return - # default case: produce the operation - value.ensure_nonnull() - self.optimizer.optimize_default(op) - # then remember the result of reading the field - fieldvalue = self.optimizer.getvalue(op.result) - self.cache_field_value(op.descr, value, fieldvalue) - - def optimize_SETFIELD_GC(self, op, value, fieldvalue): - self.force_lazy_setfield_if_necessary(op, value, write=True) - self.lazy_setfields[op.descr] = op - # remember the result of future reads of the field - self.cache_field_value(op.descr, value, fieldvalue, write=True) - - def optimize_GETARRAYITEM_GC(self, op, value): - indexvalue = self.optimizer.getvalue(op.args[1]) - fieldvalue = self.read_cached_arrayitem(op.descr, value, indexvalue) - if fieldvalue is not None: - self.optimizer.make_equal_to(op.result, fieldvalue) - return - self.optimizer.optimize_default(op) - fieldvalue = self.optimizer.getvalue(op.result) - self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue) - - def optimize_SETARRAYITEM_GC(self, op, value, fieldvalue): - self.optimizer.emit_operation(op) - indexvalue = self.optimizer.getvalue(op.args[1]) - self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue, - write=True) - Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/rewrite.py Sat Sep 4 12:16:13 2010 @@ -1,12 +1,12 @@ -from optimizer import Optimization, CONST_1, CONST_0 +from optimizer import * from pypy.jit.metainterp.resoperation import opboolinvers, opboolreflex from pypy.jit.metainterp.history import ConstInt from pypy.jit.metainterp.optimizeutil import _findall from pypy.jit.metainterp.resoperation import rop, ResOperation class OptRewrite(Optimization): - """Rewrite operations into equvivialent, already executed operations - or constants. + """Rewrite operations into equvivialent, cheeper operations. + This includes already executed operations and constants. """ def propagate_forward(self, op): @@ -127,6 +127,116 @@ else: self.emit_operation(op) + def optimize_CALL_PURE(self, op): + for arg in op.args: + if self.get_constant_box(arg) is None: + break + else: + # all constant arguments: constant-fold away + self.make_constant(op.result, op.args[0]) + return + # replace CALL_PURE with just CALL + self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result, + op.descr)) + def optimize_guard(self, op, constbox, emit_operation=True): + value = self.getvalue(op.args[0]) + if value.is_constant(): + box = value.box + assert isinstance(box, Const) + if not box.same_constant(constbox): + raise InvalidLoop + return + if emit_operation: + self.emit_operation(op) + value.make_constant(constbox) + + def optimize_GUARD_ISNULL(self, op): + value = self.getvalue(op.args[0]) + if value.is_null(): + return + elif value.is_nonnull(): + raise InvalidLoop + self.emit_operation(op) + value.make_constant(self.optimizer.cpu.ts.CONST_NULL) + + def optimize_GUARD_NONNULL(self, op): + value = self.getvalue(op.args[0]) + if value.is_nonnull(): + return + elif value.is_null(): + raise InvalidLoop + self.emit_operation(op) + value.make_nonnull(len(self.optimizer.newoperations) - 1) + + def optimize_GUARD_VALUE(self, op): + value = self.getvalue(op.args[0]) + emit_operation = True + if value.last_guard_index != -1: + # there already has been a guard_nonnull or guard_class or + # guard_nonnull_class on this value, which is rather silly. + # replace the original guard with a guard_value + old_guard_op = self.optimizer.newoperations[value.last_guard_index] + old_opnum = old_guard_op.opnum + old_guard_op.opnum = rop.GUARD_VALUE + old_guard_op.args = [old_guard_op.args[0], op.args[1]] + # hack hack hack. Change the guard_opnum on + # old_guard_op.descr so that when resuming, + # the operation is not skipped by pyjitpl.py. + descr = old_guard_op.descr + assert isinstance(descr, compile.ResumeGuardDescr) + descr.guard_opnum = rop.GUARD_VALUE + descr.make_a_counter_per_value(old_guard_op) + emit_operation = False + constbox = op.args[1] + assert isinstance(constbox, Const) + self.optimize_guard(op, constbox, emit_operation) + + def optimize_GUARD_TRUE(self, op): + self.optimize_guard(op, CONST_1) + + def optimize_GUARD_FALSE(self, op): + self.optimize_guard(op, CONST_0) + + def optimize_GUARD_CLASS(self, op): + value = self.getvalue(op.args[0]) + expectedclassbox = op.args[1] + assert isinstance(expectedclassbox, Const) + realclassbox = value.get_constant_class(self.optimizer.cpu) + if realclassbox is not None: + # the following assert should always be true for now, + # because invalid loops that would fail it are detected + # earlier, in optimizefindnode.py. + assert realclassbox.same_constant(expectedclassbox) + return + emit_operation = True + if value.last_guard_index != -1: + # there already has been a guard_nonnull or guard_class or + # guard_nonnull_class on this value. + old_guard_op = self.optimizer.newoperations[value.last_guard_index] + if old_guard_op.opnum == rop.GUARD_NONNULL: + # it was a guard_nonnull, which we replace with a + # guard_nonnull_class. + old_guard_op.opnum = rop.GUARD_NONNULL_CLASS + old_guard_op.args = [old_guard_op.args[0], op.args[1]] + # hack hack hack. Change the guard_opnum on + # old_guard_op.descr so that when resuming, + # the operation is not skipped by pyjitpl.py. + descr = old_guard_op.descr + assert isinstance(descr, compile.ResumeGuardDescr) + descr.guard_opnum = rop.GUARD_NONNULL_CLASS + emit_operation = False + if emit_operation: + self.emit_operation(op) + last_guard_index = len(self.optimizer.newoperations) - 1 + else: + last_guard_index = value.last_guard_index + value.make_constant_class(expectedclassbox, last_guard_index) + + def optimize_GUARD_NO_EXCEPTION(self, op): + if not self.optimizer.exception_might_have_happened: + return + self.emit_operation(op) + self.optimizer.exception_might_have_happened = False optimize_ops = _findall(OptRewrite, 'optimize_') Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/virtualize.py Sat Sep 4 12:16:13 2010 @@ -1,6 +1,456 @@ -from optimizer import Optimization +from pypy.jit.metainterp.specnode import SpecNode, NotSpecNode, ConstantSpecNode +from pypy.jit.metainterp.specnode import AbstractVirtualStructSpecNode +from pypy.jit.metainterp.specnode import VirtualInstanceSpecNode +from pypy.jit.metainterp.specnode import VirtualArraySpecNode +from pypy.jit.metainterp.specnode import VirtualStructSpecNode +from pypy.jit.metainterp.resoperation import rop, ResOperation +from pypy.jit.metainterp.optimizeutil import _findall +from pypy.rlib.objectmodel import we_are_translated +from optimizer import * -class Virtualize(Optimization): + +class AbstractVirtualValue(OptValue): + _attrs_ = ('optimizer', 'keybox', 'source_op', '_cached_vinfo') + box = None + level = LEVEL_NONNULL + _cached_vinfo = None + + def __init__(self, optimizer, keybox, source_op=None): + self.optimizer = optimizer + self.keybox = keybox # only used as a key in dictionaries + self.source_op = source_op # the NEW_WITH_VTABLE/NEW_ARRAY operation + # that builds this box + + def get_key_box(self): + if self.box is None: + return self.keybox + return self.box + + def force_box(self): + if self.box is None: + self.optimizer.forget_numberings(self.keybox) + self._really_force() + return self.box + + def make_virtual_info(self, modifier, fieldnums): + vinfo = self._cached_vinfo + if vinfo is not None and vinfo.equals(fieldnums): + return vinfo + vinfo = self._make_virtual(modifier) + vinfo.set_content(fieldnums) + self._cached_vinfo = vinfo + return vinfo + + def _make_virtual(self, modifier): + raise NotImplementedError("abstract base") + + def _really_force(self): + raise NotImplementedError("abstract base") + +def get_fielddescrlist_cache(cpu): + if not hasattr(cpu, '_optimizeopt_fielddescrlist_cache'): + result = descrlist_dict() + cpu._optimizeopt_fielddescrlist_cache = result + return result + return cpu._optimizeopt_fielddescrlist_cache +get_fielddescrlist_cache._annspecialcase_ = "specialize:memo" + +class AbstractVirtualStructValue(AbstractVirtualValue): + _attrs_ = ('_fields', '_cached_sorted_fields') + + def __init__(self, optimizer, keybox, source_op=None): + AbstractVirtualValue.__init__(self, optimizer, keybox, source_op) + self._fields = {} + self._cached_sorted_fields = None + + def getfield(self, ofs, default): + return self._fields.get(ofs, default) + + def setfield(self, ofs, fieldvalue): + assert isinstance(fieldvalue, OptValue) + self._fields[ofs] = fieldvalue + + def _really_force(self): + assert self.source_op is not None + # ^^^ This case should not occur any more (see test_bug_3). + # + newoperations = self.optimizer.newoperations + newoperations.append(self.source_op) + self.box = box = self.source_op.result + # + iteritems = self._fields.iteritems() + if not we_are_translated(): #random order is fine, except for tests + iteritems = list(iteritems) + iteritems.sort(key = lambda (x,y): x.sort_key()) + for ofs, value in iteritems: + if value.is_null(): + continue + subbox = value.force_box() + op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, + descr=ofs) + newoperations.append(op) + self._fields = None + + def _get_field_descr_list(self): + _cached_sorted_fields = self._cached_sorted_fields + if (_cached_sorted_fields is not None and + len(self._fields) == len(_cached_sorted_fields)): + lst = self._cached_sorted_fields + else: + lst = self._fields.keys() + sort_descrs(lst) + cache = get_fielddescrlist_cache(self.optimizer.cpu) + result = cache.get(lst, None) + if result is None: + cache[lst] = lst + else: + lst = result + # store on self, to not have to repeatedly get it from the global + # cache, which involves sorting + self._cached_sorted_fields = lst + return lst + + def get_args_for_fail(self, modifier): + if self.box is None and not modifier.already_seen_virtual(self.keybox): + # checks for recursion: it is False unless + # we have already seen the very same keybox + lst = self._get_field_descr_list() + fieldboxes = [self._fields[ofs].get_key_box() for ofs in lst] + modifier.register_virtual_fields(self.keybox, fieldboxes) + for ofs in lst: + fieldvalue = self._fields[ofs] + fieldvalue.get_args_for_fail(modifier) + + +class VirtualValue(AbstractVirtualStructValue): + level = LEVEL_KNOWNCLASS + + def __init__(self, optimizer, known_class, keybox, source_op=None): + AbstractVirtualStructValue.__init__(self, optimizer, keybox, source_op) + assert isinstance(known_class, Const) + self.known_class = known_class + + def _make_virtual(self, modifier): + fielddescrs = self._get_field_descr_list() + return modifier.make_virtual(self.known_class, fielddescrs) + +class VStructValue(AbstractVirtualStructValue): + + def __init__(self, optimizer, structdescr, keybox, source_op=None): + AbstractVirtualStructValue.__init__(self, optimizer, keybox, source_op) + self.structdescr = structdescr + + def _make_virtual(self, modifier): + fielddescrs = self._get_field_descr_list() + return modifier.make_vstruct(self.structdescr, fielddescrs) + +class VArrayValue(AbstractVirtualValue): + + def __init__(self, optimizer, arraydescr, size, keybox, source_op=None): + AbstractVirtualValue.__init__(self, optimizer, keybox, source_op) + self.arraydescr = arraydescr + self.constvalue = optimizer.new_const_item(arraydescr) + self._items = [self.constvalue] * size + + def getlength(self): + return len(self._items) + + def getitem(self, index): + res = self._items[index] + return res + + def setitem(self, index, itemvalue): + assert isinstance(itemvalue, OptValue) + self._items[index] = itemvalue + + def _really_force(self): + assert self.source_op is not None + newoperations = self.optimizer.newoperations + newoperations.append(self.source_op) + self.box = box = self.source_op.result + for index in range(len(self._items)): + subvalue = self._items[index] + if subvalue is not self.constvalue: + if subvalue.is_null(): + continue + subbox = subvalue.force_box() + op = ResOperation(rop.SETARRAYITEM_GC, + [box, ConstInt(index), subbox], None, + descr=self.arraydescr) + newoperations.append(op) + + def get_args_for_fail(self, modifier): + if self.box is None and not modifier.already_seen_virtual(self.keybox): + # checks for recursion: it is False unless + # we have already seen the very same keybox + itemboxes = [] + for itemvalue in self._items: + itemboxes.append(itemvalue.get_key_box()) + modifier.register_virtual_fields(self.keybox, itemboxes) + for itemvalue in self._items: + if itemvalue is not self.constvalue: + itemvalue.get_args_for_fail(modifier) + + def _make_virtual(self, modifier): + return modifier.make_varray(self.arraydescr) + +class __extend__(SpecNode): + def setup_virtual_node(self, optimizer, box, newinputargs): + raise NotImplementedError + def teardown_virtual_node(self, optimizer, value, newexitargs): + raise NotImplementedError + +class __extend__(NotSpecNode): + def setup_virtual_node(self, optimizer, box, newinputargs): + newinputargs.append(box) + def teardown_virtual_node(self, optimizer, value, newexitargs): + newexitargs.append(value.force_box()) + +class __extend__(ConstantSpecNode): + def setup_virtual_node(self, optimizer, box, newinputargs): + optimizer.make_constant(box, self.constbox) + def teardown_virtual_node(self, optimizer, value, newexitargs): + pass + +class __extend__(AbstractVirtualStructSpecNode): + def setup_virtual_node(self, optimizer, box, newinputargs): + vvalue = self._setup_virtual_node_1(optimizer, box) + for ofs, subspecnode in self.fields: + subbox = optimizer.new_box(ofs) + subspecnode.setup_virtual_node(optimizer, subbox, newinputargs) + vvaluefield = optimizer.getvalue(subbox) + vvalue.setfield(ofs, vvaluefield) + def _setup_virtual_node_1(self, optimizer, box): + raise NotImplementedError + def teardown_virtual_node(self, optimizer, value, newexitargs): + assert value.is_virtual() + for ofs, subspecnode in self.fields: + subvalue = value.getfield(ofs, optimizer.new_const(ofs)) + subspecnode.teardown_virtual_node(optimizer, subvalue, newexitargs) + +class __extend__(VirtualInstanceSpecNode): + def _setup_virtual_node_1(self, optimizer, box): + return optimizer.make_virtual(self.known_class, box) + +class __extend__(VirtualStructSpecNode): + def _setup_virtual_node_1(self, optimizer, box): + return optimizer.make_vstruct(self.typedescr, box) + +class __extend__(VirtualArraySpecNode): + def setup_virtual_node(self, optimizer, box, newinputargs): + vvalue = optimizer.make_varray(self.arraydescr, len(self.items), box) + for index in range(len(self.items)): + subbox = optimizer.new_box_item(self.arraydescr) + subspecnode = self.items[index] + subspecnode.setup_virtual_node(optimizer, subbox, newinputargs) + vvalueitem = optimizer.getvalue(subbox) + vvalue.setitem(index, vvalueitem) + def teardown_virtual_node(self, optimizer, value, newexitargs): + assert value.is_virtual() + for index in range(len(self.items)): + subvalue = value.getitem(index) + subspecnode = self.items[index] + subspecnode.teardown_virtual_node(optimizer, subvalue, newexitargs) + +class OptVirtualize(Optimization): "Virtualize objects until they escape." - # FIXME: Move here - + + def setup(self, virtuals): + if not virtuals: + return + + inputargs = self.optimizer.loop.inputargs + specnodes = self.optimizer.loop.token.specnodes + assert len(inputargs) == len(specnodes) + newinputargs = [] + for i in range(len(inputargs)): + specnodes[i].setup_virtual_node(self, inputargs[i], newinputargs) + self.optimizer.loop.inputargs = newinputargs + + def make_virtual(self, known_class, box, source_op=None): + vvalue = VirtualValue(self.optimizer, known_class, box, source_op) + self.make_equal_to(box, vvalue) + return vvalue + + def make_varray(self, arraydescr, size, box, source_op=None): + vvalue = VArrayValue(self.optimizer, arraydescr, size, box, source_op) + self.make_equal_to(box, vvalue) + return vvalue + + def make_vstruct(self, structdescr, box, source_op=None): + vvalue = VStructValue(self.optimizer, structdescr, box, source_op) + self.make_equal_to(box, vvalue) + return vvalue + + def optimize_JUMP(self, op): + orgop = self.optimizer.loop.operations[-1] + exitargs = [] + target_loop_token = orgop.descr + assert isinstance(target_loop_token, LoopToken) + specnodes = target_loop_token.specnodes + assert len(op.args) == len(specnodes) + for i in range(len(specnodes)): + value = self.getvalue(op.args[i]) + specnodes[i].teardown_virtual_node(self, value, exitargs) + op.args = exitargs[:] + self.emit_operation(op) + + def optimize_VIRTUAL_REF(self, op): + indexbox = op.args[1] + # + # get some constants + vrefinfo = self.optimizer.metainterp_sd.virtualref_info + c_cls = vrefinfo.jit_virtual_ref_const_class + descr_virtual_token = vrefinfo.descr_virtual_token + descr_virtualref_index = vrefinfo.descr_virtualref_index + # + # Replace the VIRTUAL_REF operation with a virtual structure of type + # 'jit_virtual_ref'. The jit_virtual_ref structure may be forced soon, + # but the point is that doing so does not force the original structure. + op = ResOperation(rop.NEW_WITH_VTABLE, [c_cls], op.result) + vrefvalue = self.make_virtual(c_cls, op.result, op) + tokenbox = BoxInt() + self.emit_operation(ResOperation(rop.FORCE_TOKEN, [], tokenbox)) + vrefvalue.setfield(descr_virtual_token, self.getvalue(tokenbox)) + vrefvalue.setfield(descr_virtualref_index, self.getvalue(indexbox)) + + def optimize_VIRTUAL_REF_FINISH(self, op): + # Set the 'forced' field of the virtual_ref. + # In good cases, this is all virtual, so has no effect. + # Otherwise, this forces the real object -- but only now, as + # opposed to much earlier. This is important because the object is + # typically a PyPy PyFrame, and now is the end of its execution, so + # forcing it now does not have catastrophic effects. + vrefinfo = self.optimizer.metainterp_sd.virtualref_info + # op.args[1] should really never point to null here + # - set 'forced' to point to the real object + op1 = ResOperation(rop.SETFIELD_GC, op.args, None, + descr = vrefinfo.descr_forced) + self.optimize_SETFIELD_GC(op1) + # - set 'virtual_token' to TOKEN_NONE + args = [op.args[0], ConstInt(vrefinfo.TOKEN_NONE)] + op1 = ResOperation(rop.SETFIELD_GC, args, None, + descr = vrefinfo.descr_virtual_token) + self.optimize_SETFIELD_GC(op1) + # Note that in some cases the virtual in op.args[1] has been forced + # already. This is fine. In that case, and *if* a residual + # CALL_MAY_FORCE suddenly turns out to access it, then it will + # trigger a ResumeGuardForcedDescr.handle_async_forcing() which + # will work too (but just be a little pointless, as the structure + # was already forced). + + def optimize_GETFIELD_GC(self, op): + value = self.getvalue(op.args[0]) + if value.is_virtual(): + # optimizefindnode should ensure that fieldvalue is found + assert isinstance(value, AbstractVirtualValue) + fieldvalue = value.getfield(op.descr, None) + assert fieldvalue is not None + self.make_equal_to(op.result, fieldvalue) + else: + value.ensure_nonnull() + ###self.heap_op_optimizer.optimize_GETFIELD_GC(op, value) + self.emit_operation(op) + + # note: the following line does not mean that the two operations are + # completely equivalent, because GETFIELD_GC_PURE is_always_pure(). + optimize_GETFIELD_GC_PURE = optimize_GETFIELD_GC + + def optimize_SETFIELD_GC(self, op): + value = self.getvalue(op.args[0]) + fieldvalue = self.getvalue(op.args[1]) + if value.is_virtual(): + value.setfield(op.descr, fieldvalue) + else: + value.ensure_nonnull() + ###self.heap_op_optimizer.optimize_SETFIELD_GC(op, value, fieldvalue) + self.emit_operation(op) + + def optimize_NEW_WITH_VTABLE(self, op): + self.make_virtual(op.args[0], op.result, op) + + def optimize_NEW(self, op): + self.make_vstruct(op.descr, op.result, op) + + def optimize_NEW_ARRAY(self, op): + sizebox = self.get_constant_box(op.args[0]) + if sizebox is not None: + # if the original 'op' did not have a ConstInt as argument, + # build a new one with the ConstInt argument + if not isinstance(op.args[0], ConstInt): + op = ResOperation(rop.NEW_ARRAY, [sizebox], op.result, + descr=op.descr) + self.make_varray(op.descr, sizebox.getint(), op.result, op) + else: + ###self.optimize_default(op) + self.emit_operation(op) + + def optimize_ARRAYLEN_GC(self, op): + value = self.getvalue(op.args[0]) + if value.is_virtual(): + self.make_constant_int(op.result, value.getlength()) + else: + value.ensure_nonnull() + ###self.optimize_default(op) + self.emit_operation(op) + + def optimize_GETARRAYITEM_GC(self, op): + value = self.getvalue(op.args[0]) + if value.is_virtual(): + indexbox = self.get_constant_box(op.args[1]) + if indexbox is not None: + itemvalue = value.getitem(indexbox.getint()) + self.make_equal_to(op.result, itemvalue) + return + value.ensure_nonnull() + ###self.heap_op_optimizer.optimize_GETARRAYITEM_GC(op, value) + self.emit_operation(op) + + # note: the following line does not mean that the two operations are + # completely equivalent, because GETARRAYITEM_GC_PURE is_always_pure(). + optimize_GETARRAYITEM_GC_PURE = optimize_GETARRAYITEM_GC + + def optimize_SETARRAYITEM_GC(self, op): + value = self.getvalue(op.args[0]) + if value.is_virtual(): + indexbox = self.get_constant_box(op.args[1]) + if indexbox is not None: + value.setitem(indexbox.getint(), self.getvalue(op.args[2])) + return + value.ensure_nonnull() + ###self.heap_op_optimizer.optimize_SETARRAYITEM_GC(op, value, fieldvalue) + self.emit_operation(op) + + def optimize_ARRAYCOPY(self, op): + source_value = self.getvalue(op.args[2]) + dest_value = self.getvalue(op.args[3]) + source_start_box = self.get_constant_box(op.args[4]) + dest_start_box = self.get_constant_box(op.args[5]) + length = self.get_constant_box(op.args[6]) + if (source_value.is_virtual() and source_start_box and dest_start_box + and length and dest_value.is_virtual()): + # XXX optimize the case where dest value is not virtual, + # but we still can avoid a mess + source_start = source_start_box.getint() + dest_start = dest_start_box.getint() + for index in range(length.getint()): + val = source_value.getitem(index + source_start) + dest_value.setitem(index + dest_start, val) + return + if length and length.getint() == 0: + return # 0-length arraycopy + descr = op.args[0] + assert isinstance(descr, AbstractDescr) + self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result, + descr)) + + def propagate_forward(self, op): + opnum = op.opnum + for value, func in optimize_ops: + if opnum == value: + func(self, op) + break + else: + self.emit_operation(op) + +optimize_ops = _findall(OptVirtualize, 'optimize_') Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_optimizeopt.py Sat Sep 4 12:16:13 2010 @@ -5,6 +5,7 @@ BaseTest) from pypy.jit.metainterp.optimizefindnode import PerfectSpecializationFinder import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt +import pypy.jit.metainterp.optimizeopt.virtualize as virtualize from pypy.jit.metainterp.optimizeopt import optimize_loop_1 from pypy.jit.metainterp.optimizeutil import InvalidLoop from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt @@ -64,7 +65,7 @@ class cpu(object): pass opt = FakeOptimizer() - virt1 = optimizeopt.AbstractVirtualStructValue(opt, None) + virt1 = virtualize.AbstractVirtualStructValue(opt, None) lst1 = virt1._get_field_descr_list() assert lst1 == [] lst2 = virt1._get_field_descr_list() @@ -75,7 +76,7 @@ lst4 = virt1._get_field_descr_list() assert lst3 is lst4 - virt2 = optimizeopt.AbstractVirtualStructValue(opt, None) + virt2 = virtualize.AbstractVirtualStructValue(opt, None) lst5 = virt2._get_field_descr_list() assert lst5 is lst1 virt2.setfield(LLtypeMixin.valuedescr, optimizeopt.OptValue(None)) @@ -88,7 +89,7 @@ self.fieldnums = fieldnums def equals(self, fieldnums): return self.fieldnums == fieldnums - class FakeVirtualValue(optimizeopt.AbstractVirtualValue): + class FakeVirtualValue(virtualize.AbstractVirtualValue): def _make_virtual(self, *args): return FakeVInfo() v1 = FakeVirtualValue(None, None, None) @@ -257,6 +258,7 @@ optimize_loop_1(metainterp_sd, loop) # expected = self.parse(optops) + print '\n'.join([str(o) for o in loop.operations]) self.assert_equal(loop, expected) def test_simple(self): Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_resume.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_resume.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_resume.py Sat Sep 4 12:16:13 2010 @@ -1,7 +1,7 @@ import py from pypy.rpython.lltypesystem import lltype, llmemory, rffi -from pypy.jit.metainterp.optimizeopt.optimizer import VirtualValue, OptValue, VArrayValue -from pypy.jit.metainterp.optimizeopt.optimizer import VStructValue +from pypy.jit.metainterp.optimizeopt.virtualize import VirtualValue, OptValue, VArrayValue +from pypy.jit.metainterp.optimizeopt.virtualize import VStructValue from pypy.jit.metainterp.resume import * from pypy.jit.metainterp.history import BoxInt, BoxPtr, ConstInt from pypy.jit.metainterp.history import ConstPtr, ConstFloat From antocuni at codespeak.net Sat Sep 4 12:48:30 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Sat, 4 Sep 2010 12:48:30 +0200 (CEST) Subject: [pypy-svn] r76865 - in pypy/build/bot2/pypybuildbot: . test Message-ID: <20100904104830.24CF8282BF1@codespeak.net> Author: antocuni Date: Sat Sep 4 12:48:29 2010 New Revision: 76865 Modified: pypy/build/bot2/pypybuildbot/builds.py pypy/build/bot2/pypybuildbot/summary.py pypy/build/bot2/pypybuildbot/test/test_summary.py Log: add two properties to the build status of own tests and app-level tests: - a summary of test result (i.e., the number of passed/failed/etc tests) - the description of the test (to distinguish e.g. own and applevel tests) The idea is to use these infos to build an index revision->test status, and show it on the nightly build page Modified: pypy/build/bot2/pypybuildbot/builds.py ============================================================================== --- pypy/build/bot2/pypybuildbot/builds.py (original) +++ pypy/build/bot2/pypybuildbot/builds.py Sat Sep 4 12:48:29 2010 @@ -54,6 +54,19 @@ [self.translationTarget] + targetArgs) #self.command = ['cp', '/tmp/pypy-c', '.'] + +class TestRunnerCmd(ShellCmd): + + def commandComplete(self, cmd): + from pypybuildbot.summary import RevisionOutcomeSet + pytestLog = cmd.logs['pytestLog'] + outcome = RevisionOutcomeSet(None) + outcome.populate(pytestLog) + summary = outcome.get_summary() + build_status = self.build.build_status + build_status.setProperty('test_summary', summary, "TestRunnerCmd") + build_status.setProperty('test_description', self.description, "TestRunnerCmd") + # ________________________________________________________________ def setup_steps(platform, factory, workdir=None): @@ -72,6 +85,7 @@ workdir=workdir)) + class Own(factory.BuildFactory): def __init__(self, platform='linux', cherrypick='', extra_cfgs=[]): @@ -79,7 +93,7 @@ setup_steps(platform, self) - self.addStep(ShellCmd( + self.addStep(TestRunnerCmd( description="pytest", command=["python", "testrunner/runner.py", "--logfile=testrun.log", @@ -109,7 +123,7 @@ if app_tests: if app_tests == True: app_tests = [] - self.addStep(ShellCmd( + self.addStep(TestRunnerCmd( description="app-level (-A) test", command=["python", "testrunner/runner.py", "--logfile=pytest-A.log", Modified: pypy/build/bot2/pypybuildbot/summary.py ============================================================================== --- pypy/build/bot2/pypybuildbot/summary.py (original) +++ pypy/build/bot2/pypybuildbot/summary.py Sat Sep 4 12:48:29 2010 @@ -23,6 +23,16 @@ return "%dm" % mins return "%dh%d" % (mins/60, mins%60) +class OutcomeSummary(object): + def __init__(self, p, F, s, x): + self.p = p # passed + self.F = F # failed + self.s = s # skipped + self.x = x # xfailed + + def __str__(self): + return '%d, %d F, %d s, %d x' % (self.p, self.F, self.s, self.x) + class RevisionOutcomeSet(object): def __init__(self, rev, key=None, run_info=None): @@ -35,6 +45,12 @@ self.longreprs = {} self._run_info = run_info + def get_summary(self): + return OutcomeSummary(self.numpassed, + len(self.failed), + len(self.skipped), + self.numxfailed) + def populate_one(self, name, shortrepr, longrepr=None): if shortrepr == '!': namekey = [name, ''] Modified: pypy/build/bot2/pypybuildbot/test/test_summary.py ============================================================================== --- pypy/build/bot2/pypybuildbot/test/test_summary.py (original) +++ pypy/build/bot2/pypybuildbot/test/test_summary.py Sat Sep 4 12:48:29 2010 @@ -46,6 +46,29 @@ key_namekey = rev_outcome_set.get_key_namekey(("a.c", "test_four")) assert key_namekey == (('foo', 40), ("a.c", "test_four")) + def test_get_summary(self): + rev_outcome_set = summary.RevisionOutcomeSet(None) + + log = StringIO(""". a/b.py:test_one +F a/b.py:test_two +F a/b.py:test_three +s a/c.py:test_four +s a/c.py:test_five +s a/c.py:test_six +x a/c.py:test_seven +x a/c.py:test_eight +x a/c.py:test_nine +x a/c.py:test_ten +""") + + rev_outcome_set.populate(log) + sum = rev_outcome_set.get_summary() + assert sum.p == 1 + assert sum.F == 2 + assert sum.s == 3 + assert sum.x == 4 + assert str(sum) == '1, 2 F, 3 s, 4 x' + def test_populate_from_empty(self): rev_outcome_set = summary.RevisionOutcomeSet(0) log = StringIO("") From arigo at codespeak.net Sat Sep 4 13:07:52 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sat, 4 Sep 2010 13:07:52 +0200 (CEST) Subject: [pypy-svn] r76866 - pypy/build/bot2/pypybuildbot Message-ID: <20100904110752.9EE3E282BF1@codespeak.net> Author: arigo Date: Sat Sep 4 13:07:51 2010 New Revision: 76866 Modified: pypy/build/bot2/pypybuildbot/master.py Log: Updates. Modified: pypy/build/bot2/pypybuildbot/master.py ============================================================================== --- pypy/build/bot2/pypybuildbot/master.py (original) +++ pypy/build/bot2/pypybuildbot/master.py Sat Sep 4 13:07:51 2010 @@ -20,14 +20,16 @@ StatusResourceBuild_init = StatusResourceBuild.__init__ def my_init(self, build_status, build_control, builder_control): StatusResourceBuild_init(self, build_status, build_control, None) -StatusResourceBuild.__init__ = my_init +if StatusResourceBuild.__init__.__name__ == '__init__': + StatusResourceBuild.__init__ = my_init # Disabled. # Disable pinging, as it seems to deadlock the client from buildbot.status.web.builder import StatusResourceBuilder def my_ping(self, req): raise Exception("pinging is disabled, as it seems to deadlock clients") -StatusResourceBuilder.ping = my_ping +if StatusResourceBuilder.ping.__name__ == 'ping': + StatusResourceBuilder.ping = my_ping # Disabled. # Add a link from the builder page to the summary page @@ -46,15 +48,18 @@ data[i:]) return data _previous_body = StatusResourceBuilder.body -StatusResourceBuilder.body = my_body +if _previous_body.__name__ == 'body': + StatusResourceBuilder.body = my_body # Done # Add a similar link from the build page to the summary page def my_body_2(self, req): data = _previous_body_2(self, req) - MARKER = '

SourceStamp' - i = data.find(MARKER) - if i >= 0: + MARKER1 = '

Results' + MARKER2 = '

SourceStamp' + i1 = data.find(MARKER1) + i2 = data.find(MARKER2) + if i1 >= 0 and i2 >= 0: from twisted.web import html b = self.build_status ss = b.getSourceStamp() @@ -64,12 +69,13 @@ "summary?builder=" + html.escape(builder_name) + "&branch=" + html.escape(branch)) data = '%s   (view in summary)\n\n%s'% ( - data[:i], + data[:i2], url, - data[i:]) + data[i2:]) return data _previous_body_2 = StatusResourceBuild.body -StatusResourceBuild.body = my_body_2 +if _previous_body_2.__name__ == 'body': + StatusResourceBuild.body = my_body_2 # Picking a random slave is not really what we want; # let's pick the first available one instead. From arigo at codespeak.net Sat Sep 4 13:50:54 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sat, 4 Sep 2010 13:50:54 +0200 (CEST) Subject: [pypy-svn] r76867 - in pypy/trunk/pypy: annotation rpython Message-ID: <20100904115054.72183282BF1@codespeak.net> Author: arigo Date: Sat Sep 4 13:50:52 2010 New Revision: 76867 Modified: pypy/trunk/pypy/annotation/binaryop.py pypy/trunk/pypy/annotation/bookkeeper.py pypy/trunk/pypy/annotation/builtin.py pypy/trunk/pypy/annotation/model.py pypy/trunk/pypy/rpython/rbuiltin.py Log: It's wrong to check during the annotation of raw_memclear() that we are not given the constant NULL; it introduces an order dependency (maybe we are given the constant NULL so far, but that's because we did not yet analyze the rest of the code that can pass a non-NULL value). Modified: pypy/trunk/pypy/annotation/binaryop.py ============================================================================== --- pypy/trunk/pypy/annotation/binaryop.py (original) +++ pypy/trunk/pypy/annotation/binaryop.py Sat Sep 4 13:50:52 2010 @@ -924,10 +924,10 @@ class __extend__(pairtype(SomeAddress, SomeAddress)): def union((s_addr1, s_addr2)): - return SomeAddress(is_null=s_addr1.is_null and s_addr2.is_null) + return SomeAddress() def sub((s_addr1, s_addr2)): - if s_addr1.is_null and s_addr2.is_null: + if s_addr1.is_null_address() and s_addr2.is_null_address(): return getbookkeeper().immutablevalue(0) return SomeInteger() @@ -953,10 +953,10 @@ class __extend__(pairtype(SomeAddress, SomeInteger)): def add((s_addr, s_int)): - return SomeAddress(is_null=False) + return SomeAddress() def sub((s_addr, s_int)): - return SomeAddress(is_null=False) + return SomeAddress() class __extend__(pairtype(SomeAddress, SomeImpossibleValue)): # need to override this specifically to hide the 'raise UnionError' Modified: pypy/trunk/pypy/annotation/bookkeeper.py ============================================================================== --- pypy/trunk/pypy/annotation/bookkeeper.py (original) +++ pypy/trunk/pypy/annotation/bookkeeper.py Sat Sep 4 13:50:52 2010 @@ -431,7 +431,7 @@ elif isinstance(x, lltype._ptr): result = SomePtr(lltype.typeOf(x)) elif isinstance(x, llmemory.fakeaddress): - result = SomeAddress(is_null=not x) + result = SomeAddress() elif isinstance(x, ootype._static_meth): result = SomeOOStaticMeth(ootype.typeOf(x)) elif isinstance(x, ootype._class): Modified: pypy/trunk/pypy/annotation/builtin.py ============================================================================== --- pypy/trunk/pypy/annotation/builtin.py (original) +++ pypy/trunk/pypy/annotation/builtin.py Sat Sep 4 13:50:52 2010 @@ -694,18 +694,14 @@ def raw_free(s_addr): assert isinstance(s_addr, SomeAddress) - assert not s_addr.is_null def raw_memclear(s_addr, s_int): assert isinstance(s_addr, SomeAddress) - assert not s_addr.is_null assert isinstance(s_int, SomeInteger) def raw_memcopy(s_addr1, s_addr2, s_int): assert isinstance(s_addr1, SomeAddress) - assert not s_addr1.is_null assert isinstance(s_addr2, SomeAddress) - assert not s_addr2.is_null assert isinstance(s_int, SomeInteger) #XXX add noneg...? BUILTIN_ANALYZERS[llmemory.raw_malloc] = raw_malloc Modified: pypy/trunk/pypy/annotation/model.py ============================================================================== --- pypy/trunk/pypy/annotation/model.py (original) +++ pypy/trunk/pypy/annotation/model.py Sat Sep 4 13:50:52 2010 @@ -500,12 +500,13 @@ class SomeAddress(SomeObject): immutable = True - def __init__(self, is_null=False): - self.is_null = is_null def can_be_none(self): return False + def is_null_address(self): + return self.is_immutable_constant() and not self.const + # The following class is used to annotate the intermediate value that # appears in expressions of the form: # addr.signed[offset] and addr.signed[offset] = value Modified: pypy/trunk/pypy/rpython/rbuiltin.py ============================================================================== --- pypy/trunk/pypy/rpython/rbuiltin.py (original) +++ pypy/trunk/pypy/rpython/rbuiltin.py Sat Sep 4 13:50:52 2010 @@ -542,16 +542,25 @@ return hop.genop('raw_malloc_usage', [v_size], resulttype=lltype.Signed) def rtype_raw_free(hop): + s_addr = hop.args_s[0] + if s_addr.is_null_address(): + raise TyperError("raw_free(x) where x is the constant NULL") v_addr, = hop.inputargs(llmemory.Address) hop.exception_cannot_occur() return hop.genop('raw_free', [v_addr]) def rtype_raw_memcopy(hop): + for s_addr in hop.args_s[:2]: + if s_addr.is_null_address(): + raise TyperError("raw_memcopy() with a constant NULL") v_list = hop.inputargs(llmemory.Address, llmemory.Address, lltype.Signed) hop.exception_cannot_occur() return hop.genop('raw_memcopy', v_list) def rtype_raw_memclear(hop): + s_addr = hop.args_s[0] + if s_addr.is_null_address(): + raise TyperError("raw_memclear(x, n) where x is the constant NULL") v_list = hop.inputargs(llmemory.Address, lltype.Signed) return hop.genop('raw_memclear', v_list) From arigo at codespeak.net Sun Sep 5 10:33:36 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 5 Sep 2010 10:33:36 +0200 (CEST) Subject: [pypy-svn] r76868 - pypy/trunk/pypy/interpreter Message-ID: <20100905083336.95C20282B90@codespeak.net> Author: arigo Date: Sun Sep 5 10:33:33 2010 New Revision: 76868 Modified: pypy/trunk/pypy/interpreter/pycode.py Log: This error message makes no sense when running on top of pypy. Modified: pypy/trunk/pypy/interpreter/pycode.py ============================================================================== --- pypy/trunk/pypy/interpreter/pycode.py (original) +++ pypy/trunk/pypy/interpreter/pycode.py Sun Sep 5 10:33:33 2010 @@ -4,7 +4,7 @@ The bytecode interpreter itself is implemented by the PyFrame class. """ -import dis, imp, struct, types, new +import dis, imp, struct, types, new, sys from pypy.interpreter import eval from pypy.interpreter.argument import Signature @@ -118,7 +118,8 @@ self._compute_flatcall() def _freeze_(self): - if self.magic == cpython_magic: + if (self.magic == cpython_magic and + '__pypy__' not in sys.builtin_module_names): raise Exception("CPython host codes should not be rendered") return False From arigo at codespeak.net Sun Sep 5 10:53:17 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 5 Sep 2010 10:53:17 +0200 (CEST) Subject: [pypy-svn] r76869 - pypy/branch/interplevel-array Message-ID: <20100905085317.8767D282B90@codespeak.net> Author: arigo Date: Sun Sep 5 10:53:15 2010 New Revision: 76869 Removed: pypy/branch/interplevel-array/ Log: Remove this branch. From arigo at codespeak.net Sun Sep 5 13:04:18 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 5 Sep 2010 13:04:18 +0200 (CEST) Subject: [pypy-svn] r76870 - pypy/branch/better-map-instances/pypy/module/cpyext Message-ID: <20100905110418.2A92A282B90@codespeak.net> Author: arigo Date: Sun Sep 5 13:04:16 2010 New Revision: 76870 Modified: pypy/branch/better-map-instances/pypy/module/cpyext/classobject.py Log: Translation fix. Modified: pypy/branch/better-map-instances/pypy/module/cpyext/classobject.py ============================================================================== --- pypy/branch/better-map-instances/pypy/module/cpyext/classobject.py (original) +++ pypy/branch/better-map-instances/pypy/module/cpyext/classobject.py Sun Sep 5 13:04:16 2010 @@ -15,7 +15,7 @@ class is the class of new object. The dict parameter will be used as the object's __dict__; if NULL, a new dictionary will be created for the instance.""" - if not PyClass_Check(space, w_class): + if not isinstance(w_class, W_ClassObject): return PyErr_BadInternalCall(space) w_result = w_class.instantiate(space) if w_dict is not None: From arigo at codespeak.net Sun Sep 5 13:08:31 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 5 Sep 2010 13:08:31 +0200 (CEST) Subject: [pypy-svn] r76871 - in pypy/branch/better-map-instances/pypy: jit/codewriter objspace/std rpython rpython/lltypesystem rpython/lltypesystem/test rpython/memory/test rpython/ootypesystem rpython/test translator/backendopt/test Message-ID: <20100905110831.E4C72282B90@codespeak.net> Author: arigo Date: Sun Sep 5 13:08:29 2010 New Revision: 76871 Modified: pypy/branch/better-map-instances/pypy/jit/codewriter/jtransform.py pypy/branch/better-map-instances/pypy/objspace/std/tupleobject.py pypy/branch/better-map-instances/pypy/rpython/lltypesystem/lloperation.py pypy/branch/better-map-instances/pypy/rpython/lltypesystem/lltype.py pypy/branch/better-map-instances/pypy/rpython/lltypesystem/opimpl.py pypy/branch/better-map-instances/pypy/rpython/lltypesystem/test/test_lloperation.py pypy/branch/better-map-instances/pypy/rpython/lltypesystem/test/test_lltype.py pypy/branch/better-map-instances/pypy/rpython/memory/test/test_gctypelayout.py pypy/branch/better-map-instances/pypy/rpython/ootypesystem/ootype.py pypy/branch/better-map-instances/pypy/rpython/ootypesystem/rclass.py pypy/branch/better-map-instances/pypy/rpython/rclass.py pypy/branch/better-map-instances/pypy/rpython/test/test_rclass.py pypy/branch/better-map-instances/pypy/translator/backendopt/test/test_constfold.py Log: Merge branch/no-_immutable_ in this branch too. Modified: pypy/branch/better-map-instances/pypy/jit/codewriter/jtransform.py ============================================================================== --- pypy/branch/better-map-instances/pypy/jit/codewriter/jtransform.py (original) +++ pypy/branch/better-map-instances/pypy/jit/codewriter/jtransform.py Sun Sep 5 13:08:29 2010 @@ -511,14 +511,11 @@ arraydescr) return [] # check for deepfrozen structures that force constant-folding - hints = v_inst.concretetype.TO._hints - accessor = hints.get("immutable_fields") - if accessor and c_fieldname.value in accessor.fields: + immut = v_inst.concretetype.TO._immutable_field(c_fieldname.value) + if immut: pure = '_pure' - if accessor.fields[c_fieldname.value] == "[*]": + if immut == "[*]": self.immutable_arrays[op.result] = True - elif hints.get('immutable'): - pure = '_pure' else: pure = '' argname = getattr(v_inst.concretetype.TO, '_gckind', 'gc') Modified: pypy/branch/better-map-instances/pypy/objspace/std/tupleobject.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/tupleobject.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/tupleobject.py Sun Sep 5 13:08:29 2010 @@ -10,7 +10,7 @@ class W_TupleObject(W_Object): from pypy.objspace.std.tupletype import tuple_typedef as typedef - _immutable_ = True + _immutable_fields_ = ['wrappeditems[*]'] def __init__(w_self, wrappeditems): make_sure_not_resized(wrappeditems) Modified: pypy/branch/better-map-instances/pypy/rpython/lltypesystem/lloperation.py ============================================================================== --- pypy/branch/better-map-instances/pypy/rpython/lltypesystem/lloperation.py (original) +++ pypy/branch/better-map-instances/pypy/rpython/lltypesystem/lloperation.py Sun Sep 5 13:08:29 2010 @@ -85,16 +85,20 @@ fold = roproperty(get_fold_impl) def is_pure(self, args_v): - return (self.canfold or # canfold => pure operation - self is llop.debug_assert or # debug_assert is pure enough - # reading from immutable - (self in (llop.getfield, llop.getarrayitem) and - args_v[0].concretetype.TO._hints.get('immutable')) or - (self is llop.getfield and # reading from immutable_field - 'immutable_fields' in args_v[0].concretetype.TO._hints and - args_v[1].value in args_v[0].concretetype.TO - ._hints['immutable_fields'].fields)) - # XXX: what about ootype immutable arrays? + if self.canfold: # canfold => pure operation + return True + if self is llop.debug_assert: # debug_assert is pure enough + return True + # reading from immutable (lltype) + if self is llop.getfield or self is llop.getarrayitem: + field = getattr(args_v[1], 'value', None) + return args_v[0].concretetype.TO._immutable_field(field) + # reading from immutable (ootype) (xxx what about arrays?) + if self is llop.oogetfield: + field = getattr(args_v[1], 'value', None) + return args_v[0].concretetype._immutable_field(field) + # default + return False def __repr__(self): return '' % (getattr(self, 'opname', '?'),) Modified: pypy/branch/better-map-instances/pypy/rpython/lltypesystem/lltype.py ============================================================================== --- pypy/branch/better-map-instances/pypy/rpython/lltypesystem/lltype.py (original) +++ pypy/branch/better-map-instances/pypy/rpython/lltypesystem/lltype.py Sun Sep 5 13:08:29 2010 @@ -297,6 +297,15 @@ n = 1 return _struct(self, n, initialization='example') + def _immutable_field(self, field): + if 'immutable_fields' in self._hints: + try: + s = self._hints['immutable_fields'].fields[field] + return s or True + except KeyError: + pass + return self._hints.get('immutable', False) + class RttiStruct(Struct): _runtime_type_info = None @@ -391,6 +400,9 @@ def _container_example(self): return _array(self, 1, initialization='example') + def _immutable_field(self, index=None): + return self._hints.get('immutable', False) + class GcArray(Array): _gckind = 'gc' def _inline_is_varsize(self, last): Modified: pypy/branch/better-map-instances/pypy/rpython/lltypesystem/opimpl.py ============================================================================== --- pypy/branch/better-map-instances/pypy/rpython/lltypesystem/opimpl.py (original) +++ pypy/branch/better-map-instances/pypy/rpython/lltypesystem/opimpl.py Sun Sep 5 13:08:29 2010 @@ -150,12 +150,7 @@ # we can constant-fold this if the innermost structure from which we # read the final field is immutable. T = lltype.typeOf(innermostcontainer).TO - if T._hints.get('immutable'): - pass - elif ('immutable_fields' in T._hints and - offsets[-1] in T._hints['immutable_fields'].fields): - pass - else: + if not T._immutable_field(offsets[-1]): raise TypeError("cannot fold getinteriorfield on mutable struct") assert not isinstance(ob, lltype._interior_ptr) return ob @@ -437,19 +432,15 @@ def op_getfield(p, name): checkptr(p) TYPE = lltype.typeOf(p).TO - if TYPE._hints.get('immutable'): - pass - elif ('immutable_fields' in TYPE._hints and - name in TYPE._hints['immutable_fields'].fields): - pass - else: + if not TYPE._immutable_field(name): raise TypeError("cannot fold getfield on mutable struct") return getattr(p, name) def op_getarrayitem(p, index): checkptr(p) - if not lltype.typeOf(p).TO._hints.get('immutable'): - raise TypeError("cannot fold getfield on mutable array") + ARRAY = lltype.typeOf(p).TO + if not ARRAY._immutable_field(index): + raise TypeError("cannot fold getarrayitem on mutable array") return p[index] def _normalize(x): Modified: pypy/branch/better-map-instances/pypy/rpython/lltypesystem/test/test_lloperation.py ============================================================================== --- pypy/branch/better-map-instances/pypy/rpython/lltypesystem/test/test_lloperation.py (original) +++ pypy/branch/better-map-instances/pypy/rpython/lltypesystem/test/test_lloperation.py Sun Sep 5 13:08:29 2010 @@ -88,7 +88,7 @@ accessor = rclass.FieldListAccessor() S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), hints={'immutable_fields': accessor}) - accessor.initialize(S3, ['x']) + accessor.initialize(S3, {'x': ''}) v_s3 = Variable() v_s3.concretetype = lltype.Ptr(S3) assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) @@ -103,7 +103,7 @@ accessor = rclass.FieldListAccessor() S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), hints={'immutable_fields': accessor}) - accessor.initialize(S3, ['x']) + accessor.initialize(S3, {'x': ''}) # s1 = lltype.malloc(S1); s1.x = 45 py.test.raises(TypeError, llop.getfield, lltype.Signed, s1, 'x') Modified: pypy/branch/better-map-instances/pypy/rpython/lltypesystem/test/test_lltype.py ============================================================================== --- pypy/branch/better-map-instances/pypy/rpython/lltypesystem/test/test_lltype.py (original) +++ pypy/branch/better-map-instances/pypy/rpython/lltypesystem/test/test_lltype.py Sun Sep 5 13:08:29 2010 @@ -781,6 +781,28 @@ p = cast_opaque_ptr(llmemory.GCREF, a) assert hash1 == identityhash(p) +def test_immutable_hint(): + S = GcStruct('S', ('x', lltype.Signed)) + assert S._immutable_field('x') == False + # + S = GcStruct('S', ('x', lltype.Signed), hints={'immutable': True}) + assert S._immutable_field('x') == True + # + class FieldListAccessor(object): + def __init__(self, fields): + self.fields = fields + S = GcStruct('S', ('x', lltype.Signed), + hints={'immutable_fields': FieldListAccessor({'x':''})}) + assert S._immutable_field('x') == True + # + class FieldListAccessor(object): + def __init__(self, fields): + self.fields = fields + S = GcStruct('S', ('x', lltype.Signed), + hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) + assert S._immutable_field('x') == '[*]' + + class TestTrackAllocation: def setup_method(self, func): start_tracking_allocations() Modified: pypy/branch/better-map-instances/pypy/rpython/memory/test/test_gctypelayout.py ============================================================================== --- pypy/branch/better-map-instances/pypy/rpython/memory/test/test_gctypelayout.py (original) +++ pypy/branch/better-map-instances/pypy/rpython/memory/test/test_gctypelayout.py Sun Sep 5 13:08:29 2010 @@ -101,7 +101,7 @@ accessor = rclass.FieldListAccessor() S3 = lltype.GcStruct('S', ('x', PT), ('y', PT), hints={'immutable_fields': accessor}) - accessor.initialize(S3, ['x']) + accessor.initialize(S3, {'x': ''}) # s1 = lltype.malloc(S1) adr = llmemory.cast_ptr_to_adr(s1) Modified: pypy/branch/better-map-instances/pypy/rpython/ootypesystem/ootype.py ============================================================================== --- pypy/branch/better-map-instances/pypy/rpython/ootypesystem/ootype.py (original) +++ pypy/branch/better-map-instances/pypy/rpython/ootypesystem/ootype.py Sun Sep 5 13:08:29 2010 @@ -267,6 +267,14 @@ return self._fields_with_default[:] return self._superclass._get_fields_with_default() + self._fields_with_default + def _immutable_field(self, field): + if 'immutable_fields' in self._hints: + try: + s = self._hints['immutable_fields'].fields[field] + return s or True + except KeyError: + pass + return self._hints.get('immutable', False) class SpecializableType(OOType): Modified: pypy/branch/better-map-instances/pypy/rpython/ootypesystem/rclass.py ============================================================================== --- pypy/branch/better-map-instances/pypy/rpython/ootypesystem/rclass.py (original) +++ pypy/branch/better-map-instances/pypy/rpython/ootypesystem/rclass.py Sun Sep 5 13:08:29 2010 @@ -194,6 +194,7 @@ self.lowleveltype._hints.update(hints) if self.classdef is None: + self.fields = {} self.allfields = {} self.allmethods = {} self.allclassattributes = {} @@ -210,6 +211,7 @@ allclassattributes = {} fields = {} + nonmangledfields = [] fielddefaults = {} if llfields: @@ -224,6 +226,7 @@ allfields[mangled] = repr oot = repr.lowleveltype fields[mangled] = oot + nonmangledfields.append(name) try: value = self.classdef.classdesc.read_attribute(name) fielddefaults[mangled] = repr.convert_desc_or_const(value) @@ -294,6 +297,7 @@ if not attrdef.s_value.is_constant(): classattributes[mangled] = attrdef.s_value, value + self.fields = nonmangledfields self.allfields = allfields self.allmethods = allmethods self.allclassattributes = allclassattributes Modified: pypy/branch/better-map-instances/pypy/rpython/rclass.py ============================================================================== --- pypy/branch/better-map-instances/pypy/rpython/rclass.py (original) +++ pypy/branch/better-map-instances/pypy/rpython/rclass.py Sun Sep 5 13:08:29 2010 @@ -9,6 +9,7 @@ class FieldListAccessor(object): def initialize(self, TYPE, fields): + assert type(fields) is dict self.TYPE = TYPE self.fields = fields @@ -18,6 +19,10 @@ def _freeze_(self): return True +class ImmutableConflictError(Exception): + """Raised when the _immutable_ or _immutable_fields_ hints are + not consistent across a class hierarchy.""" + def getclassrepr(rtyper, classdef): try: @@ -153,12 +158,16 @@ pass def _check_for_immutable_hints(self, hints): - if '_immutable_' in self.classdef.classdesc.classdict: + if self.classdef.classdesc.lookup('_immutable_') is not None: hints = hints.copy() hints['immutable'] = True - if '_immutable_fields_' in self.classdef.classdesc.classdict: + self.immutable_field_list = [] # unless overwritten below + if self.classdef.classdesc.lookup('_immutable_fields_') is not None: hints = hints.copy() - self.immutable_field_list = self.classdef.classdesc.classdict['_immutable_fields_'].value + immutable_fields = self.classdef.classdesc.classdict.get( + '_immutable_fields_') + if immutable_fields is not None: + self.immutable_field_list = immutable_fields.value accessor = FieldListAccessor() hints['immutable_fields'] = accessor return hints @@ -178,10 +187,20 @@ return 'InstanceR %s' % (clsname,) def _setup_repr_final(self): + self._setup_immutable_field_list() + self._check_for_immutable_conflicts() + + def _setup_immutable_field_list(self): hints = self.object_type._hints if "immutable_fields" in hints: accessor = hints["immutable_fields"] - self._parse_field_list(self.immutable_field_list, accessor) + if not hasattr(accessor, 'fields'): + immutable_fields = [] + rbase = self + while rbase.classdef is not None: + immutable_fields += rbase.immutable_field_list + rbase = rbase.rbase + self._parse_field_list(immutable_fields, accessor) def _parse_field_list(self, fields, accessor): with_suffix = {} @@ -191,11 +210,44 @@ suffix = '[*]' else: suffix = '' - mangled_name, r = self._get_field(name) + try: + mangled_name, r = self._get_field(name) + except KeyError: + continue with_suffix[mangled_name] = suffix accessor.initialize(self.object_type, with_suffix) return with_suffix + def _check_for_immutable_conflicts(self): + # check for conflicts, i.e. a field that is defined normally as + # mutable in some parent class but that is now declared immutable + from pypy.rpython.lltypesystem.lltype import Void + is_self_immutable = "immutable" in self.object_type._hints + base = self + while base.classdef is not None: + base = base.rbase + for fieldname in base.fields: + try: + mangled, r = base._get_field(fieldname) + except KeyError: + continue + if r.lowleveltype == Void: + continue + base._setup_immutable_field_list() + if base.object_type._immutable_field(mangled): + continue + # 'fieldname' is a mutable, non-Void field in the parent + if is_self_immutable: + raise ImmutableConflictError( + "class %r has _immutable_=True, but parent class %r " + "defines (at least) the mutable field %r" % ( + self, base, fieldname)) + if fieldname in self.immutable_field_list: + raise ImmutableConflictError( + "field %r is defined mutable in class %r, but " + "listed in _immutable_fields_ in subclass %r" % ( + fieldname, base, self)) + def new_instance(self, llops, classcallhop=None): raise NotImplementedError Modified: pypy/branch/better-map-instances/pypy/rpython/test/test_rclass.py ============================================================================== --- pypy/branch/better-map-instances/pypy/rpython/test/test_rclass.py (original) +++ pypy/branch/better-map-instances/pypy/rpython/test/test_rclass.py Sun Sep 5 13:08:29 2010 @@ -738,27 +738,92 @@ assert accessor.fields == {"inst_x" : "", "inst_y" : "[*]"} or \ accessor.fields == {"ox" : "", "oy" : "[*]"} # for ootype - def test_immutable_inheritance(self): - class I(object): - def __init__(self, v): - self.v = v - - class J(I): + def test_immutable_forbidden_inheritance_1(self): + from pypy.rpython.rclass import ImmutableConflictError + class A(object): + pass + class B(A): + _immutable_fields_ = ['v'] + def f(): + A().v = 123 + B() # crash: class B says 'v' is immutable, + # but it is defined on parent class A + py.test.raises(ImmutableConflictError, self.gengraph, f, []) + + def test_immutable_forbidden_inheritance_2(self): + from pypy.rpython.rclass import ImmutableConflictError + class A(object): + pass + class B(A): + _immutable_ = True + def f(): + A().v = 123 + B() # crash: class B has _immutable_ = True + # but class A defines 'v' to be mutable + py.test.raises(ImmutableConflictError, self.gengraph, f, []) + + def test_immutable_ok_inheritance_2(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + _immutable_fields_ = ['v'] + class B(A): + _immutable_ = True + def f(): + A().v = 123 + B().w = 456 + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] + try: + A_TYPE = B_TYPE.super + except AttributeError: + A_TYPE = B_TYPE._superclass # for ootype + accessor = A_TYPE._hints["immutable_fields"] + assert accessor.fields == {"inst_v" : ""} or \ + accessor.fields == {"ov" : ""} # for ootype + + def test_immutable_subclass_1(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + _immutable_ = True + class B(A): + pass + def f(): + B().v = 123 + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] # inherited from A + + def test_immutable_subclass_2(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + pass + class B(A): _immutable_ = True - def __init__(self, v, w): - self.w = w - I.__init__(self, v) - - j = J(3, 4) - def f(): - j.v = j.v * 1 # make the annotator think it is mutated - j.w = j.w * 1 # make the annotator think it is mutated - return j.v + j.w - - t, typer, graph = self.gengraph(f, [], backendopt=True) - f_summary = summary(graph) - assert f_summary == {"setfield": 2} or \ - f_summary == {"oosetfield": 2} # for ootype + def f(): + B().v = 123 + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] + + def test_immutable_subclass_void(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + pass + class B(A): + _immutable_ = True + def myfunc(): + pass + def f(): + A().f = myfunc # it's ok to add Void attributes to A + B().v = 123 # even though only B is declared _immutable_ + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] class TestLLtype(BaseTestRclass, LLRtypeMixin): Modified: pypy/branch/better-map-instances/pypy/translator/backendopt/test/test_constfold.py ============================================================================== --- pypy/branch/better-map-instances/pypy/translator/backendopt/test/test_constfold.py (original) +++ pypy/branch/better-map-instances/pypy/translator/backendopt/test/test_constfold.py Sun Sep 5 13:08:29 2010 @@ -49,7 +49,7 @@ accessor = rclass.FieldListAccessor() S2 = lltype.GcStruct('S2', ('x', lltype.Signed), hints={'immutable_fields': accessor}) - accessor.initialize(S2, ['x']) + accessor.initialize(S2, {'x': ''}) test_simple(S2) From antocuni at codespeak.net Sun Sep 5 15:31:25 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Sun, 5 Sep 2010 15:31:25 +0200 (CEST) Subject: [pypy-svn] r76872 - pypy/build/bot2/pypybuildbot Message-ID: <20100905133125.BA02A282B90@codespeak.net> Author: antocuni Date: Sun Sep 5 15:31:24 2010 New Revision: 76872 Modified: pypy/build/bot2/pypybuildbot/builds.py Log: try another approach: attaching the test summary to the build is not a good idea, because then getting the summary given the revision is expensive. Instead, we build a mapping revision->summary as we go: since the mapping is attached to the builder_status, it is also serialized and saved on disk, so it persists across restarts. Modified: pypy/build/bot2/pypybuildbot/builds.py ============================================================================== --- pypy/build/bot2/pypybuildbot/builds.py (original) +++ pypy/build/bot2/pypybuildbot/builds.py Sun Sep 5 15:31:24 2010 @@ -62,10 +62,17 @@ pytestLog = cmd.logs['pytestLog'] outcome = RevisionOutcomeSet(None) outcome.populate(pytestLog) - summary = outcome.get_summary() + summary = outcome.get_summary() build_status = self.build.build_status - build_status.setProperty('test_summary', summary, "TestRunnerCmd") - build_status.setProperty('test_description', self.description, "TestRunnerCmd") + builder = build_status.builder + if not hasattr(builder, 'summary_by_revision'): + builder.summary_by_revision = {} + try: + rev = build_status.getProperty('got_revision') + except KeyError: + return + else: + builder.summary_by_revision[rev] = summary # ________________________________________________________________ From arigo at codespeak.net Sun Sep 5 15:58:04 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 5 Sep 2010 15:58:04 +0200 (CEST) Subject: [pypy-svn] r76873 - pypy/branch/better-map-instances/pypy/objspace/std Message-ID: <20100905135804.15FD4282B90@codespeak.net> Author: arigo Date: Sun Sep 5 15:58:01 2010 New Revision: 76873 Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Log: Fix RPythonicity. Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Sun Sep 5 15:58:01 2010 @@ -11,6 +11,7 @@ NUM_DIGITS = 4 class AbstractAttribute(object): + _immutable_fields_ = ['w_cls'] cache_attrs = None _size_estimate = 0 @@ -86,7 +87,6 @@ class Terminator(AbstractAttribute): - _immutable_fields_ = ['w_cls'] def read(self, obj, selector): return None From arigo at codespeak.net Sun Sep 5 15:58:53 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 5 Sep 2010 15:58:53 +0200 (CEST) Subject: [pypy-svn] r76874 - pypy/trunk/pypy/rlib Message-ID: <20100905135853.7F1CD282B90@codespeak.net> Author: arigo Date: Sun Sep 5 15:58:51 2010 New Revision: 76874 Modified: pypy/trunk/pypy/rlib/rweakref.py Log: Fix for an order dependency. Modified: pypy/trunk/pypy/rlib/rweakref.py ============================================================================== --- pypy/trunk/pypy/rlib/rweakref.py (original) +++ pypy/trunk/pypy/rlib/rweakref.py Sun Sep 5 15:58:51 2010 @@ -78,7 +78,7 @@ return self.__class__, def method_get(self, s_key): - assert isinstance(s_key, annmodel.SomeString) + assert annmodel.SomeString(can_be_None=True).contains(s_key) return annmodel.SomeInstance(self.valueclassdef, can_be_None=True) def method_set(self, s_key, s_value): From arigo at codespeak.net Sun Sep 5 16:00:10 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 5 Sep 2010 16:00:10 +0200 (CEST) Subject: [pypy-svn] r76875 - pypy/trunk/pypy/module/posix/test Message-ID: <20100905140010.9A7DA282B90@codespeak.net> Author: arigo Date: Sun Sep 5 16:00:09 2010 New Revision: 76875 Modified: pypy/trunk/pypy/module/posix/test/test_posix2.py Log: Revert r76863. The problem needed a more general workaround: I have put a sitecustomize.py that deletes os.openpty and posix.openpty. Modified: pypy/trunk/pypy/module/posix/test/test_posix2.py ============================================================================== --- pypy/trunk/pypy/module/posix/test/test_posix2.py (original) +++ pypy/trunk/pypy/module/posix/test/test_posix2.py Sun Sep 5 16:00:09 2010 @@ -324,21 +324,12 @@ if hasattr(__import__(os.name), "openpty"): def test_openpty(self): os = self.posix - g = os.popen("uname -r", "r") - version = g.read() - g.close() - if version.startswith('2.6.31'): - skip("openpty() deadlocks completely on " - "at least some Linux 2.6.31") - master_fd, slave_fd = self.posix.openpty() - try: - assert isinstance(master_fd, int) - assert isinstance(slave_fd, int) - os.write(slave_fd, 'x') - assert os.read(master_fd, 1) == 'x' - finally: - os.close(master_fd) - os.close(slave_fd) + master_fd, slave_fd = os.openpty() + assert isinstance(master_fd, int) + assert isinstance(slave_fd, int) + os.write(slave_fd, 'x\n') + data = os.read(master_fd, 100) + assert data.startswith('x') if hasattr(__import__(os.name), "execv"): From antocuni at codespeak.net Sun Sep 5 23:24:47 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Sun, 5 Sep 2010 23:24:47 +0200 (CEST) Subject: [pypy-svn] r76877 - pypy/build/bot2/pypybuildbot Message-ID: <20100905212447.6D043282B90@codespeak.net> Author: antocuni Date: Sun Sep 5 23:24:45 2010 New Revision: 76877 Modified: pypy/build/bot2/pypybuildbot/builds.py Log: it seems that we need to explicitly save the builder in order to persist summary_by_revision Modified: pypy/build/bot2/pypybuildbot/builds.py ============================================================================== --- pypy/build/bot2/pypybuildbot/builds.py (original) +++ pypy/build/bot2/pypybuildbot/builds.py Sun Sep 5 23:24:45 2010 @@ -73,6 +73,7 @@ return else: builder.summary_by_revision[rev] = summary + builder.saveYourself() # ________________________________________________________________ From cfbolz at codespeak.net Mon Sep 6 10:01:11 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Mon, 6 Sep 2010 10:01:11 +0200 (CEST) Subject: [pypy-svn] r76881 - in pypy/branch/better-map-instances/pypy: interpreter module/__builtin__ Message-ID: <20100906080111.F197E282B90@codespeak.net> Author: cfbolz Date: Mon Sep 6 10:01:10 2010 New Revision: 76881 Modified: pypy/branch/better-map-instances/pypy/interpreter/typedef.py pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py Log: make old-style classes use mapdict Modified: pypy/branch/better-map-instances/pypy/interpreter/typedef.py ============================================================================== --- pypy/branch/better-map-instances/pypy/interpreter/typedef.py (original) +++ pypy/branch/better-map-instances/pypy/interpreter/typedef.py Mon Sep 6 10:01:10 2010 @@ -198,8 +198,7 @@ value = func_with_new_name(value, value.func_name) body[key] = value - if (features == ("user", "dict", "weakref", "slots") and - config.objspace.std.withmapdict): + if (config.objspace.std.withmapdict and "dict" in features): from pypy.objspace.std.mapdict import Object add(Object) features = () Modified: pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py ============================================================================== --- pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py (original) +++ pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py Mon Sep 6 10:01:10 2010 @@ -397,7 +397,8 @@ if w_meth is not None: space.call_function(w_meth, w_name, w_value) else: - self.setdictvalue(space, name, w_value) + # bit obscure: appease normalization + self.setdictvalue(space, name, w_value, True) def descr_delattr(self, space, w_name): name = unwrap_attr(space, w_name) From arigo at codespeak.net Mon Sep 6 10:55:32 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 6 Sep 2010 10:55:32 +0200 (CEST) Subject: [pypy-svn] r76888 - in pypy: branch/gc-module gc-module Message-ID: <20100906085532.B7B02282B90@codespeak.net> Author: arigo Date: Mon Sep 6 10:55:31 2010 New Revision: 76888 Added: pypy/branch/gc-module/ - copied from r76887, pypy/gc-module/ Removed: pypy/gc-module/ Log: Oups, move it to "branch". From arigo at codespeak.net Mon Sep 6 11:01:29 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 6 Sep 2010 11:01:29 +0200 (CEST) Subject: [pypy-svn] r76889 - pypy/branch/gc-module/pypy/module/gc Message-ID: <20100906090129.0B409282B90@codespeak.net> Author: arigo Date: Mon Sep 6 11:01:27 2010 New Revision: 76889 Modified: pypy/branch/gc-module/pypy/module/gc/__init__.py pypy/branch/gc-module/pypy/module/gc/interp_gc.py Log: Kill gc.estimate_heap_size(). It's not too useful in that form. Modified: pypy/branch/gc-module/pypy/module/gc/__init__.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/__init__.py (original) +++ pypy/branch/gc-module/pypy/module/gc/__init__.py Mon Sep 6 11:01:27 2010 @@ -10,7 +10,6 @@ 'collect': 'interp_gc.collect', 'enable_finalizers': 'interp_gc.enable_finalizers', 'disable_finalizers': 'interp_gc.disable_finalizers', - 'estimate_heap_size': 'interp_gc.estimate_heap_size', 'garbage' : 'space.newlist([])', #'dump_heap_stats': 'interp_gc.dump_heap_stats', } Modified: pypy/branch/gc-module/pypy/module/gc/interp_gc.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/interp_gc.py (original) +++ pypy/branch/gc-module/pypy/module/gc/interp_gc.py Mon Sep 6 11:01:27 2010 @@ -24,36 +24,6 @@ # ____________________________________________________________ -import sys -platform = sys.platform - -def estimate_heap_size(space): - # XXX should be done with the help of the GCs - if platform == "linux2": - import os - pid = os.getpid() - try: - fd = os.open("/proc/" + str(pid) + "/status", os.O_RDONLY, 0777) - except OSError: - pass - else: - try: - content = os.read(fd, 1000000) - finally: - os.close(fd) - lines = content.split("\n") - for line in lines: - if line.startswith("VmSize:"): - start = line.find(" ") # try to ignore tabs - assert start > 0 - stop = len(line) - 3 - assert stop > 0 - result = int(line[start:stop].strip(" ")) * 1024 - return space.wrap(result) - raise OperationError(space.w_RuntimeError, - space.wrap("can't estimate the heap size")) -estimate_heap_size.unwrap_spec = [ObjSpace] - def dump_heap_stats(space, filename): tb = rgc._heap_stats() if not tb: From antocuni at codespeak.net Mon Sep 6 12:15:29 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Mon, 6 Sep 2010 12:15:29 +0200 (CEST) Subject: [pypy-svn] r76891 - in pypy/build/bot2/pypybuildbot: . test Message-ID: <20100906101529.EAE265080B@codespeak.net> Author: antocuni Date: Mon Sep 6 12:15:27 2010 New Revision: 76891 Added: pypy/build/bot2/pypybuildbot/test/test_pypylist.py (contents, props changed) Modified: pypy/build/bot2/pypybuildbot/builds.py pypy/build/bot2/pypybuildbot/pypylist.py pypy/build/bot2/pypybuildbot/summary.py Log: - use both the branch and the revision to index the test summary - display the test results in the nightly build page Modified: pypy/build/bot2/pypybuildbot/builds.py ============================================================================== --- pypy/build/bot2/pypybuildbot/builds.py (original) +++ pypy/build/bot2/pypybuildbot/builds.py Mon Sep 6 12:15:27 2010 @@ -65,14 +65,20 @@ summary = outcome.get_summary() build_status = self.build.build_status builder = build_status.builder - if not hasattr(builder, 'summary_by_revision'): - builder.summary_by_revision = {} + properties = build_status.getProperties() + if not hasattr(builder, 'summary_by_branch_and_revision'): + builder.summary_by_branch_and_revision = {} try: - rev = build_status.getProperty('got_revision') + rev = properties['got_revision'] + branch = properties['branch'] + if branch is None: + branch = 'trunk' + if branch.endswith('/'): + branch = branch[:-1] except KeyError: return else: - builder.summary_by_revision[rev] = summary + builder.summary_by_branch_and_revision[(branch, rev)] = summary builder.saveYourself() # ________________________________________________________________ Modified: pypy/build/bot2/pypybuildbot/pypylist.py ============================================================================== --- pypy/build/bot2/pypybuildbot/pypylist.py (original) +++ pypy/build/bot2/pypybuildbot/pypylist.py Mon Sep 6 12:15:27 2010 @@ -1,35 +1,208 @@ -from twisted.web.static import File +import os.path +import datetime +import itertools import re +import py +from twisted.web import resource +from twisted.web.static import File, DirectoryLister -# to get the desired order keep in mind that they are reversed at the end, so -# the highest the value, the bigger the priority -FEATURES = { - 'jit': 100, - 'nojit': 50, - 'stackless': 10 - } +class PyPyTarball(object): -PLATFORMS = { - 'linux': 100, - 'linux64': 50, - 'win32': 10, - } + # to get the desired order keep in mind that they are reversed at the end, + # so the highest the value, the bigger the priority + FEATURES_PRIORITY = { + 'jit': 100, + 'nojit': 50, + 'stackless': 10 + } + + PLATFORM_PRIORITY = { + 'linux': 100, + 'linux64': 50, + 'osx': 30, + 'win32': 10, + } + + PLATFORMS = { + 'linux': 'linux-x86-32', + 'linux64': 'linux-x86-64', + 'osx': 'macosx-x86-32', + 'win32': 'win-x86-32', + } + + DESCRIPTIONS = { + 'nojit': 'app-level', + 'jit': 'jit', + 'stackless': 'stackless-app-level', + } + + def __init__(self, filename): + self.filename = filename + try: + self.parse_filename() + except ValueError: + self.exe = None + self.backend = None + self.features = None + self.rev = -1 + self.platform = None + + def parse_filename(self): + if not self.filename.endswith('.tar.bz2'): + raise ValueError + name = self.filename.replace('.tar.bz2', '') + self.exe, self.backend, self.features, self.rev, self.platform = name.split('-') + + def key(self): + return (self.rev, + self.FEATURES_PRIORITY.get(self.features, -1), + self.PLATFORM_PRIORITY.get(self.platform, -1)) + + def get_builder_names(self): + platform = self.PLATFORMS.get(self.platform, self.platform) + description = self.DESCRIPTIONS.get(self.features, self.features) + own_builder = 'own-%s' % platform + app_builder = '%s-%s-%s-%s' % (self.exe, self.backend, description, platform) + return own_builder, app_builder -def parsename(name): - # name is something like pypy-c-jit-75654-linux.tar.bz2 - try: - name2 = name.replace('.tar.bz2', '') - exe, backend, features, rev, platform = name2.split('-') - except ValueError: - return '', name - else: - return rev, FEATURES.get(features, -1), PLATFORMS.get(platform, -1), name class PyPyList(File): def listNames(self): names = File.listNames(self) - items = map(parsename, names) - items.sort() - items.reverse() - return [item[-1] for item in items] + items = map(PyPyTarball, names) + items.sort(key=PyPyTarball.key, reverse=True) + return [item.filename for item in items] + + def directoryListing(self): + def is_pypy_dir(names): + for name in names: + if name.startswith('pypy-c'): + return True + return False + names = self.listNames() + if is_pypy_dir(names): + Listener = PyPyDirectoryLister + else: + Listener = DirectoryLister + return Listener(self.path, + names, + self.contentTypes, + self.contentEncodings, + self.defaultType) + + + +class PyPyDirectoryLister(DirectoryLister): + template = """ + +%(header)s + + + + +

%(header)s

+ + + + + + + + + + + + +%(tableContent)s + +
FilenameSizeDateown testsapplevel tests
+ + + +""" + + linePattern = """ + %(text)s + %(size)s + %(date)s + %(own_summary)s + %(app_summary)s + +""" + + def render(self, request): + self.status = request.site.buildbot_service.getStatus() + return DirectoryLister.render(self, request) + + def _buildTableContent(self, elements): + tableContent = [] + rowClasses = itertools.cycle(['odd', 'even']) + for element, rowClass in zip(elements, rowClasses): + element["class"] = rowClass + result = self._add_test_results(element, rowClass) + tableContent.append(self.linePattern % element) + return tableContent + + def _add_test_results(self, element, rowClass): + filename = element['href'] + f = py.path.local(self.path).join(filename) + date = datetime.date.fromtimestamp(f.mtime()) + element['date'] = date.isoformat() + t = PyPyTarball(filename) + own_builder, app_builder = t.get_builder_names() + own_summary = self._get_summary(own_builder, t.rev) + app_summary = self._get_summary(app_builder, t.rev) + element['own_summary'] = own_summary + element['app_summary'] = app_summary + element['own_summary_class'] = self._get_summary_class(own_summary, rowClass) + element['app_summary_class'] = self._get_summary_class(app_summary, rowClass) + + def _get_summary_class(self, summary, rowClass): + if summary is None: + return rowClass + elif summary.is_ok(): + return rowClass + '-passed' + else: + rowClass + '-failed' + + def _get_branch(self): + parts = self.path.split(os.path.sep) + i = parts.index('nightly') + branch = os.path.sep.join(parts[i+1:]) + return branch + + def _get_summary(self, builder_name, rev): + try: + branch = self._get_branch() + builder = self.status.getBuilder(builder_name) + return builder.summary_by_branch_and_revision[(branch, rev)] + except (AttributeError, KeyError): + return None + # for testing + ## from pypybuildbot.summary import OutcomeSummary + ## import random + ## if random.choice([True, True, True, False]): + ## return OutcomeSummary(1000, 0, 2, 4) + ## else: + ## return OutcomeSummary(990, 10, 2, 4) Modified: pypy/build/bot2/pypybuildbot/summary.py ============================================================================== --- pypy/build/bot2/pypybuildbot/summary.py (original) +++ pypy/build/bot2/pypybuildbot/summary.py Mon Sep 6 12:15:27 2010 @@ -30,6 +30,9 @@ self.s = s # skipped self.x = x # xfailed + def is_ok(self): + return self.F == 0 + def __str__(self): return '%d, %d F, %d s, %d x' % (self.p, self.F, self.s, self.x) Added: pypy/build/bot2/pypybuildbot/test/test_pypylist.py ============================================================================== --- (empty file) +++ pypy/build/bot2/pypybuildbot/test/test_pypylist.py Mon Sep 6 12:15:27 2010 @@ -0,0 +1,67 @@ +from pypybuildbot.pypylist import PyPyTarball + +def test_pypytarball(): + t = PyPyTarball('pypy-c-jit-75654-linux.tar.bz2') + assert t.filename == 'pypy-c-jit-75654-linux.tar.bz2' + assert t.exe == 'pypy' + assert t.backend == 'c' + assert t.features == 'jit' + assert t.rev == '75654' + assert t.platform == 'linux' + +def test_invalid_filename(): + t = PyPyTarball('foo') + assert t.filename == 'foo' + assert t.exe == None + assert t.backend == None + assert t.features == None + assert t.rev == -1 + assert t.platform == None + t2 = PyPyTarball('pypy-c-jit-75654-linux.tar.bz2') + assert t < t2 + +def test_sort(): + files = map(PyPyTarball, [ + 'pypy-c-jit-10000-linux.tar.bz2', + 'pypy-c-jit-20000-linux.tar.bz2', + 'pypy-c-nojit-10000-linux.tar.bz2', + 'pypy-c-jit-10000-linux64.tar.bz2', + 'pypy-c-jit-10000-win32.tar.bz2', + 'pypy-c-stackless-10000-linux.tar.bz2', + ]) + + files.sort(key=PyPyTarball.key, reverse=True) + files = [f.filename for f in files] + assert files == [ + 'pypy-c-jit-20000-linux.tar.bz2', + 'pypy-c-jit-10000-linux.tar.bz2', + 'pypy-c-jit-10000-linux64.tar.bz2', + 'pypy-c-jit-10000-win32.tar.bz2', + 'pypy-c-nojit-10000-linux.tar.bz2', + 'pypy-c-stackless-10000-linux.tar.bz2', + ] + +def test_builder_names(): + t = PyPyTarball('pypy-c-jit-76867-linux.tar.bz2') + assert t.get_builder_names() == ('own-linux-x86-32', + 'pypy-c-jit-linux-x86-32') + + t = PyPyTarball('pypy-c-nojit-76867-linux.tar.bz2') + assert t.get_builder_names() == ('own-linux-x86-32', + 'pypy-c-app-level-linux-x86-32') + + t = PyPyTarball('pypy-c-stackless-76867-linux.tar.bz2') + assert t.get_builder_names() == ('own-linux-x86-32', + 'pypy-c-stackless-app-level-linux-x86-32') + + t = PyPyTarball('pypy-c-jit-76867-osx.tar.bz2') + assert t.get_builder_names() == ('own-macosx-x86-32', + 'pypy-c-jit-macosx-x86-32') + + t = PyPyTarball('pypy-c-jit-76867-linux64.tar.bz2') + assert t.get_builder_names() == ('own-linux-x86-64', + 'pypy-c-jit-linux-x86-64') + + t = PyPyTarball('pypy-c-jit-76867-win32.tar.bz2') + assert t.get_builder_names() == ('own-win-x86-32', + 'pypy-c-jit-win-x86-32') From arigo at codespeak.net Mon Sep 6 13:42:13 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 6 Sep 2010 13:42:13 +0200 (CEST) Subject: [pypy-svn] r76892 - in pypy/branch/gc-module/pypy/rlib: . test Message-ID: <20100906114213.75A5F5080B@codespeak.net> Author: arigo Date: Mon Sep 6 13:42:11 2010 New Revision: 76892 Modified: pypy/branch/gc-module/pypy/rlib/rgc.py pypy/branch/gc-module/pypy/rlib/test/test_rgc.py Log: Interface to be implemented by the gc. Modified: pypy/branch/gc-module/pypy/rlib/rgc.py ============================================================================== --- pypy/branch/gc-module/pypy/rlib/rgc.py (original) +++ pypy/branch/gc-module/pypy/rlib/rgc.py Mon Sep 6 13:42:11 2010 @@ -313,3 +313,53 @@ func._dont_inline_ = True func._gc_no_collect_ = True return func + +# ____________________________________________________________ + +def _get_objects(): + lst = gc.get_objects() + return map(_GcRef, lst) + +def _get_referents(gcref): + lst = gc.get_referents(gcref._x) + return map(_GcRef, lst) + +def _get_memory_usage(gcref): + # approximate implementation using CPython's type info + Class = type(gcref._x) + size = Class.__basicsize__ + if Class.__itemsize__ > 0: + size += Class.__itemsize__ * len(gcref._x) + return size + +class _GcRef(object): + # implementation-specific: there should not be any after translation + __slots__ = ['_x'] + def __init__(self, x): + self._x = x + def __hash__(self): + return object.__hash__(self._x) + def __eq__(self, other): + assert isinstance(other, _GcRef) + return self._x is other._x + def __ne__(self, other): + return not self.__eq__(other) + def __repr__(self): + return "_GcRef(%r)" % (self._x, ) + def _freeze_(self): + raise Exception("instances of rlib.rgc._GcRef cannot be translated") + +def cast_instance_to_gcref(x): + # Before translation, casts an RPython instance into a _GcRef. + # After translation, it is a variant of cast_object_to_ptr(GCREF). + return _GcRef(x) + +def try_cast_gcref_to_instance(Class, gcref): + # Before translation, unwraps the RPython instance contained in a _GcRef. + # After translation, it is a type-check performed by the GC. + # Important: the GC only supports calling this function with one Class + # in the whole RPython program (so it can store a single Yes/No bit in + # gctypelayout.py). + if isinstance(gcref._x, Class): + return gcref._x + return None Modified: pypy/branch/gc-module/pypy/rlib/test/test_rgc.py ============================================================================== --- pypy/branch/gc-module/pypy/rlib/test/test_rgc.py (original) +++ pypy/branch/gc-module/pypy/rlib/test/test_rgc.py Mon Sep 6 13:42:11 2010 @@ -153,3 +153,29 @@ assert len(s2.vars) == 3 for i in range(3): assert s2.vars[i] == 50 + i + + +def test_get_objects(): + class X(object): + pass + x1 = X() + lst = rgc._get_objects() + assert rgc.cast_instance_to_gcref(x1) in lst + +def test_get_referents(): + class X(object): + __slots__ = ['stuff'] + x1 = X() + x1.stuff = X() + x2 = X() + lst = rgc._get_referents(rgc.cast_instance_to_gcref(x1)) + lst2 = [rgc.try_cast_gcref_to_instance(X, x) for x in lst] + assert x1.stuff in lst2 + assert x2 not in lst2 + +def test_get_memory_usage(): + class X(object): + pass + x1 = X() + n = rgc._get_memory_usage(rgc.cast_instance_to_gcref(x1)) + assert n >= 8 and n <= 64 From arigo at codespeak.net Mon Sep 6 14:17:58 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 6 Sep 2010 14:17:58 +0200 (CEST) Subject: [pypy-svn] r76893 - pypy/branch/gc-module/pypy/module/gc/test Message-ID: <20100906121758.4E8DF5080B@codespeak.net> Author: arigo Date: Mon Sep 6 14:17:56 2010 New Revision: 76893 Modified: pypy/branch/gc-module/pypy/module/gc/test/test_gc.py Log: Remove the test too. Modified: pypy/branch/gc-module/pypy/module/gc/test/test_gc.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/test/test_gc.py (original) +++ pypy/branch/gc-module/pypy/module/gc/test/test_gc.py Mon Sep 6 14:17:56 2010 @@ -59,13 +59,6 @@ raises(ValueError, gc.enable_finalizers) runtest(True) - def test_estimate_heap_size(self): - import sys, gc - if sys.platform == "linux2": - assert gc.estimate_heap_size() > 1024 - else: - raises(RuntimeError, gc.estimate_heap_size) - def test_enable(self): import gc assert gc.isenabled() From arigo at codespeak.net Mon Sep 6 14:18:20 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 6 Sep 2010 14:18:20 +0200 (CEST) Subject: [pypy-svn] r76894 - in pypy/branch/gc-module/pypy: module/gc module/gc/test rlib Message-ID: <20100906121820.299E25080B@codespeak.net> Author: arigo Date: Mon Sep 6 14:18:18 2010 New Revision: 76894 Added: pypy/branch/gc-module/pypy/module/gc/referents.py (contents, props changed) pypy/branch/gc-module/pypy/module/gc/test/test_referents.py (contents, props changed) Modified: pypy/branch/gc-module/pypy/module/gc/__init__.py pypy/branch/gc-module/pypy/rlib/rgc.py Log: Expose the interface to app-level. Modified: pypy/branch/gc-module/pypy/module/gc/__init__.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/__init__.py (original) +++ pypy/branch/gc-module/pypy/module/gc/__init__.py Mon Sep 6 14:18:18 2010 @@ -16,6 +16,14 @@ def __init__(self, space, w_name): ts = space.config.translation.type_system - if ts == 'ootype': - del self.interpleveldefs['dump_heap_stats'] + if ts == 'lltype': + self.interpleveldefs.update({ + 'get_rpy_objects': 'referents.get_rpy_objects', + 'get_rpy_referents': 'referents.get_rpy_referents', + 'get_rpy_memory_usage': 'referents.get_rpy_memory_usage', + 'get_objects': 'referents.get_objects', + 'get_referents': 'referents.get_referents', + 'get_memory_usage': 'referents.get_memory_usage', + 'GcRef': 'referents.W_GcRef', + }) MixedModule.__init__(self, space, w_name) Added: pypy/branch/gc-module/pypy/module/gc/referents.py ============================================================================== --- (empty file) +++ pypy/branch/gc-module/pypy/module/gc/referents.py Mon Sep 6 14:18:18 2010 @@ -0,0 +1,100 @@ +from pypy.rlib import rgc +from pypy.interpreter.baseobjspace import W_Root, Wrappable +from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.gateway import ObjSpace +from pypy.rlib.objectmodel import we_are_translated + + +class W_GcRef(Wrappable): + def __init__(self, gcref): + self.gcref = gcref + +W_GcRef.typedef = TypeDef("GcRef") + + +def wrap(space, gcref): + w_obj = rgc.try_cast_gcref_to_instance(W_Root, gcref) + if w_obj is None: + w_obj = space.wrap(W_GcRef(gcref)) + return w_obj + +def unwrap(space, w_obj): + gcrefobj = space.interpclass_w(w_obj) + if isinstance(gcrefobj, W_GcRef): + gcref = gcrefobj.gcref + else: + gcref = rgc.cast_instance_to_gcref(w_obj) + return gcref + +def get_rpy_objects(space): + """Return a list of all objects (huge). + This contains a lot of GcRefs.""" + result = rgc._get_objects() + return space.newlist([wrap(space, gcref) for gcref in result]) + +def get_rpy_referents(space, w_obj): + """Return a list of all the referents, as reported by the GC. + This is likely to contain a lot of GcRefs.""" + gcref = unwrap(space, w_obj) + lst = rgc._get_referents(gcref) + return space.newlist([wrap(space, gcref) for gcref in lst]) + +def get_rpy_memory_usage(space, w_obj): + """Return the memory usage of just the given object or GcRef. + This does not include the internal structures of the object.""" + gcref = unwrap(space, w_obj) + size = rgc._get_memory_usage(gcref) + return space.wrap(size) + +def get_objects(space): + """Return a list of all app-level objects.""" + result = [] + for gcref in rgc._get_objects(): + w_obj = rgc.try_cast_gcref_to_instance(W_Root, gcref) + if w_obj is not None: + if we_are_translated() or hasattr(w_obj, 'typedef'): + result.append(w_obj) + return space.newlist(result) + +def get_referents(space, args_w): + """Return the list of objects that directly refer to any of objs. + Approximative: follow references recursively until it finds + app-level objects.""" + result = [] + pending = [] + for w_obj in args_w: + pending.append(unwrap(space, w_obj)) + i = 0 + while i < len(pending): + gcref = pending[i] + i += 1 + lst = rgc._get_referents(gcref) + for gcref in lst: + w_subobj = rgc.try_cast_gcref_to_instance(W_Root, gcref) + if w_subobj is not None: + result.append(w_subobj) + elif gcref not in pending: + pending.append(gcref) + return space.newlist(result) +get_referents.unwrap_spec = [ObjSpace, 'args_w'] + +def get_memory_usage(space, args_w): + """Return the total size of the object(s) passed as argument. + Approximative: follow references recursively and compute the + total of the sizes, stopping at other app-level objects.""" + result = 0 + pending = [] + for w_obj in args_w: + pending.append(unwrap(space, w_obj)) + i = 0 + while i < len(pending): + gcref = pending[i] + i += 1 + result += rgc._get_memory_usage(gcref) + lst = rgc._get_referents(gcref) + for gcref in lst: + if (rgc.try_cast_gcref_to_instance(W_Root, gcref) is None + and gcref not in pending): + pending.append(gcref) + return space.wrap(result) +get_memory_usage.unwrap_spec = [ObjSpace, 'args_w'] Added: pypy/branch/gc-module/pypy/module/gc/test/test_referents.py ============================================================================== --- (empty file) +++ pypy/branch/gc-module/pypy/module/gc/test/test_referents.py Mon Sep 6 14:18:18 2010 @@ -0,0 +1,74 @@ + + +class AppTestReferents(object): + + def test_get_objects(self): + # XXX this test should be run first, before GcRefs are created. + import gc + x = [2, 3, 4] + lst = gc.get_objects() + for found in lst: + if found is x: + break + else: + assert 0, "'x' not found in get_rpy_objects" + for found in lst: + if type(found) is gc.GcRef: + assert 0, "get_objects() returned a GcRef" + + def test_get_rpy_objects(self): + import gc + x = [2, 3, 4] + lst = gc.get_rpy_objects() + for found in lst: + if found is x: + break + else: + assert 0, "'x' not found in get_rpy_objects" + for found in lst: + if type(found) is gc.GcRef: + break + else: + assert 0, "get_rpy_objects() did not return any GcRef" + + def test_get_rpy_referents(self): + import gc + y = 12345 + x = [y] + lst = gc.get_rpy_referents(x) + # After translation, 'lst' should contain the RPython-level list + # (as a GcStruct). Before translation, the 'wrappeditems' list. + print lst + lst2 = [x for x in lst if type(x) is gc.GcRef] + assert lst2 != [] + # In any case, we should land on 'y' after one or two extra levels + # of indirection. + lst3 = [] + for x in lst2: lst3 += gc.get_rpy_referents(x) + if y not in lst3: + lst4 = [] + for x in lst3: lst4 += gc.get_rpy_referents(x) + if y not in lst4: + assert 0, "does not seem to reach 'y'" + + def test_get_rpy_memory_usage(self): + import gc + n = gc.get_rpy_memory_usage(12345) + print n + assert 4 <= n <= 64 + + def test_get_referents(self): + import gc + y = 12345 + z = 23456 + x = [y, z] + lst = gc.get_referents(x) + assert y in lst and z in lst + + def test_get_memory_usage(self): + import gc + x = [2, 5, 10] + n = gc.get_rpy_memory_usage(x) + m = gc.get_memory_usage(x) + print n, m + assert 4 <= n < m <= 128 Modified: pypy/branch/gc-module/pypy/rlib/rgc.py ============================================================================== --- pypy/branch/gc-module/pypy/rlib/rgc.py (original) +++ pypy/branch/gc-module/pypy/rlib/rgc.py Mon Sep 6 14:18:18 2010 @@ -321,8 +321,23 @@ return map(_GcRef, lst) def _get_referents(gcref): - lst = gc.get_referents(gcref._x) - return map(_GcRef, lst) + x = gcref._x + if isinstance(x, list): + return map(_GcRef, x) + elif isinstance(x, dict): + return map(_GcRef, x.keys() + x.values()) + else: + if hasattr(x, '__dict__'): + d = map(_GcRef, x.__dict__.values()) + else: + d = [] + if hasattr(type(x), '__slots__'): + for slot in type(x).__slots__: + try: + d.append(_GcRef(getattr(x, slot))) + except AttributeError: + pass + return d def _get_memory_usage(gcref): # approximate implementation using CPython's type info From arigo at codespeak.net Mon Sep 6 14:36:12 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 6 Sep 2010 14:36:12 +0200 (CEST) Subject: [pypy-svn] r76895 - pypy/branch/gc-module/pypy/rlib Message-ID: <20100906123612.020075080B@codespeak.net> Author: arigo Date: Mon Sep 6 14:36:11 2010 New Revision: 76895 Modified: pypy/branch/gc-module/pypy/rlib/rgc.py Log: Add more heuristics about what kind of objects to return when run on py.py. Modified: pypy/branch/gc-module/pypy/rlib/rgc.py ============================================================================== --- pypy/branch/gc-module/pypy/rlib/rgc.py (original) +++ pypy/branch/gc-module/pypy/rlib/rgc.py Mon Sep 6 14:36:11 2010 @@ -1,4 +1,4 @@ -import gc +import gc, types from pypy.rpython.extregistry import ExtRegistryEntry from pypy.rlib.objectmodel import we_are_translated @@ -318,7 +318,8 @@ def _get_objects(): lst = gc.get_objects() - return map(_GcRef, lst) + # discard objects that are too random or that are _freeze_=True + return [_GcRef(x) for x in lst if _keep_object(x)] def _get_referents(gcref): x = gcref._x @@ -327,17 +328,29 @@ elif isinstance(x, dict): return map(_GcRef, x.keys() + x.values()) else: + d = [] if hasattr(x, '__dict__'): - d = map(_GcRef, x.__dict__.values()) - else: - d = [] + d = x.__dict__.values() if hasattr(type(x), '__slots__'): for slot in type(x).__slots__: try: - d.append(_GcRef(getattr(x, slot))) + d.append(getattr(x, slot)) except AttributeError: pass - return d + # discard objects that are too random or that are _freeze_=True + return [_GcRef(x) for x in d if _keep_object(x)] + +def _keep_object(x): + if isinstance(x, type) or type(x) is types.ClassType: + return False # don't keep any type + if isinstance(x, (list, dict)): + return True # keep lists and dicts + try: + return not x._freeze_() # don't keep any frozen object + except AttributeError: + return type(x).__module__ != '__builtin__' # keep non-builtins + except Exception: + return False # don't keep objects whose _freeze_() method explodes def _get_memory_usage(gcref): # approximate implementation using CPython's type info From antocuni at codespeak.net Mon Sep 6 15:23:55 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Mon, 6 Sep 2010 15:23:55 +0200 (CEST) Subject: [pypy-svn] r76896 - pypy/build/bot2/pypybuildbot Message-ID: <20100906132355.8FB4E282B90@codespeak.net> Author: antocuni Date: Mon Sep 6 15:23:54 2010 New Revision: 76896 Modified: pypy/build/bot2/pypybuildbot/pypylist.py Log: add a link to the proper test summary in the nightly build page Modified: pypy/build/bot2/pypybuildbot/pypylist.py ============================================================================== --- pypy/build/bot2/pypybuildbot/pypylist.py (original) +++ pypy/build/bot2/pypybuildbot/pypylist.py Mon Sep 6 15:23:54 2010 @@ -105,6 +105,15 @@ .even-failed { background-color: #ffbbbb } .odd-failed { background-color: #ff9797 } +.summary_link { + color: black; + text-decoration: none; +} +.summary_link:hover { + color: blue; + text-decoration: underline; +} + .icon { text-align: center } .listing { margin-left: auto; @@ -146,8 +155,8 @@ %(text)s %(size)s %(date)s - %(own_summary)s - %(app_summary)s + %(own_summary)s + %(app_summary)s """ @@ -171,20 +180,21 @@ element['date'] = date.isoformat() t = PyPyTarball(filename) own_builder, app_builder = t.get_builder_names() - own_summary = self._get_summary(own_builder, t.rev) - app_summary = self._get_summary(app_builder, t.rev) - element['own_summary'] = own_summary - element['app_summary'] = app_summary - element['own_summary_class'] = self._get_summary_class(own_summary, rowClass) - element['app_summary_class'] = self._get_summary_class(app_summary, rowClass) + self._add_result_for_builder(element, own_builder, 'own_', t.rev, rowClass) + self._add_result_for_builder(element, app_builder, 'app_', t.rev, rowClass) - def _get_summary_class(self, summary, rowClass): - if summary is None: - return rowClass - elif summary.is_ok(): - return rowClass + '-passed' + def _add_result_for_builder(self, element, builder_name, prefix, rev, rowClass): + branch = self._get_branch() + summary, category = self._get_summary_and_category(builder_name, branch, rev) + if branch == 'trunk': + branch = '%3Ctrunk%3E' # + if category: + href = '/summary?category=%s&branch=%s&recentrev=%s' % (category, branch, rev) else: - rowClass + '-failed' + href = '#' + element[prefix + 'summary'] = summary + element[prefix + 'summary_class'] = self._get_summary_class(summary, rowClass) + element[prefix + 'href'] = href def _get_branch(self): parts = self.path.split(os.path.sep) @@ -192,17 +202,25 @@ branch = os.path.sep.join(parts[i+1:]) return branch - def _get_summary(self, builder_name, rev): + def _get_summary_and_category(self, builder_name, branch, rev): try: - branch = self._get_branch() builder = self.status.getBuilder(builder_name) - return builder.summary_by_branch_and_revision[(branch, rev)] + return builder.summary_by_branch_and_revision[(branch, rev)], builder.category except (AttributeError, KeyError): - return None + #return None, None # for testing - ## from pypybuildbot.summary import OutcomeSummary - ## import random - ## if random.choice([True, True, True, False]): - ## return OutcomeSummary(1000, 0, 2, 4) - ## else: - ## return OutcomeSummary(990, 10, 2, 4) + from pypybuildbot.summary import OutcomeSummary + import random + if random.choice([True, True, True, False]): + return OutcomeSummary(1000, 0, 2, 4), None + else: + return OutcomeSummary(990, 10, 2, 4), None + + def _get_summary_class(self, summary, rowClass): + if summary is None: + return rowClass + elif summary.is_ok(): + return rowClass + '-passed' + else: + return rowClass + '-failed' + From antocuni at codespeak.net Mon Sep 6 15:25:41 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Mon, 6 Sep 2010 15:25:41 +0200 (CEST) Subject: [pypy-svn] r76897 - pypy/build/bot2/pypybuildbot Message-ID: <20100906132541.CAA84282B90@codespeak.net> Author: antocuni Date: Mon Sep 6 15:25:39 2010 New Revision: 76897 Modified: pypy/build/bot2/pypybuildbot/pypylist.py Log: oups, I checked in this by mistake Modified: pypy/build/bot2/pypybuildbot/pypylist.py ============================================================================== --- pypy/build/bot2/pypybuildbot/pypylist.py (original) +++ pypy/build/bot2/pypybuildbot/pypylist.py Mon Sep 6 15:25:39 2010 @@ -207,14 +207,14 @@ builder = self.status.getBuilder(builder_name) return builder.summary_by_branch_and_revision[(branch, rev)], builder.category except (AttributeError, KeyError): - #return None, None + return None, None # for testing - from pypybuildbot.summary import OutcomeSummary - import random - if random.choice([True, True, True, False]): - return OutcomeSummary(1000, 0, 2, 4), None - else: - return OutcomeSummary(990, 10, 2, 4), None + ## from pypybuildbot.summary import OutcomeSummary + ## import random + ## if random.choice([True, True, True, False]): + ## return OutcomeSummary(1000, 0, 2, 4), None + ## else: + ## return OutcomeSummary(990, 10, 2, 4), None def _get_summary_class(self, summary, rowClass): if summary is None: From cfbolz at codespeak.net Mon Sep 6 16:08:54 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Mon, 6 Sep 2010 16:08:54 +0200 (CEST) Subject: [pypy-svn] r76898 - pypy/extradoc/pypy.org/source/_layouts Message-ID: <20100906140854.F1F76282B90@codespeak.net> Author: cfbolz Date: Mon Sep 6 16:08:53 2010 New Revision: 76898 Modified: pypy/extradoc/pypy.org/source/_layouts/page.genshi Log: change donate button to sfc. somebody else will have to regenerate the page. Modified: pypy/extradoc/pypy.org/source/_layouts/page.genshi ============================================================================== --- pypy/extradoc/pypy.org/source/_layouts/page.genshi (original) +++ pypy/extradoc/pypy.org/source/_layouts/page.genshi Mon Sep 6 16:08:53 2010 @@ -13,11 +13,10 @@
  • - - - - + + + +
  • From arigo at codespeak.net Mon Sep 6 16:24:38 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 6 Sep 2010 16:24:38 +0200 (CEST) Subject: [pypy-svn] r76899 - in pypy/extradoc/pypy.org: . source/_layouts Message-ID: <20100906142438.74595282BAD@codespeak.net> Author: arigo Date: Mon Sep 6 16:24:36 2010 New Revision: 76899 Modified: pypy/extradoc/pypy.org/compat.html pypy/extradoc/pypy.org/contact.html pypy/extradoc/pypy.org/download.html pypy/extradoc/pypy.org/features.html pypy/extradoc/pypy.org/index.html pypy/extradoc/pypy.org/source/_layouts/page.genshi Log: Fix the template (it needs to be really XML) and regenerate the html. Modified: pypy/extradoc/pypy.org/compat.html ============================================================================== --- pypy/extradoc/pypy.org/compat.html (original) +++ pypy/extradoc/pypy.org/compat.html Mon Sep 6 16:24:36 2010 @@ -114,7 +114,7 @@
    - +
    Modified: pypy/extradoc/pypy.org/contact.html ============================================================================== --- pypy/extradoc/pypy.org/contact.html (original) +++ pypy/extradoc/pypy.org/contact.html Mon Sep 6 16:24:36 2010 @@ -60,7 +60,7 @@
    - +
    Modified: pypy/extradoc/pypy.org/download.html ============================================================================== --- pypy/extradoc/pypy.org/download.html (original) +++ pypy/extradoc/pypy.org/download.html Mon Sep 6 16:24:36 2010 @@ -231,7 +231,7 @@
    - +
    Modified: pypy/extradoc/pypy.org/features.html ============================================================================== --- pypy/extradoc/pypy.org/features.html (original) +++ pypy/extradoc/pypy.org/features.html Mon Sep 6 16:24:36 2010 @@ -126,7 +126,7 @@
    - +
    Modified: pypy/extradoc/pypy.org/index.html ============================================================================== --- pypy/extradoc/pypy.org/index.html (original) +++ pypy/extradoc/pypy.org/index.html Mon Sep 6 16:24:36 2010 @@ -73,7 +73,7 @@
    - +
    Modified: pypy/extradoc/pypy.org/source/_layouts/page.genshi ============================================================================== --- pypy/extradoc/pypy.org/source/_layouts/page.genshi (original) +++ pypy/extradoc/pypy.org/source/_layouts/page.genshi Mon Sep 6 16:24:36 2010 @@ -13,10 +13,10 @@
  • - - - - + + + +
  • From arigo at codespeak.net Mon Sep 6 18:06:07 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 6 Sep 2010 18:06:07 +0200 (CEST) Subject: [pypy-svn] r76900 - in pypy/branch/gc-module/pypy: module/gc module/gc/test rlib Message-ID: <20100906160607.ED06A282B90@codespeak.net> Author: arigo Date: Mon Sep 6 18:06:06 2010 New Revision: 76900 Modified: pypy/branch/gc-module/pypy/module/gc/__init__.py pypy/branch/gc-module/pypy/module/gc/referents.py pypy/branch/gc-module/pypy/module/gc/test/test_referents.py pypy/branch/gc-module/pypy/rlib/rgc.py Log: Change the interface in rlib.rgc to something that should be a bit easier on the gc. Modified: pypy/branch/gc-module/pypy/module/gc/__init__.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/__init__.py (original) +++ pypy/branch/gc-module/pypy/module/gc/__init__.py Mon Sep 6 18:06:06 2010 @@ -15,15 +15,13 @@ } def __init__(self, space, w_name): - ts = space.config.translation.type_system - if ts == 'lltype': + if (not space.config.translating or + space.config.translation.gctransformer == "framework"): self.interpleveldefs.update({ - 'get_rpy_objects': 'referents.get_rpy_objects', 'get_rpy_referents': 'referents.get_rpy_referents', 'get_rpy_memory_usage': 'referents.get_rpy_memory_usage', 'get_objects': 'referents.get_objects', 'get_referents': 'referents.get_referents', - 'get_memory_usage': 'referents.get_memory_usage', 'GcRef': 'referents.W_GcRef', }) MixedModule.__init__(self, space, w_name) Modified: pypy/branch/gc-module/pypy/module/gc/referents.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/referents.py (original) +++ pypy/branch/gc-module/pypy/module/gc/referents.py Mon Sep 6 18:06:06 2010 @@ -12,8 +12,14 @@ W_GcRef.typedef = TypeDef("GcRef") -def wrap(space, gcref): +def try_cast_gcref_to_w_root(gcref): w_obj = rgc.try_cast_gcref_to_instance(W_Root, gcref) + if not we_are_translated() and not hasattr(w_obj, 'typedef'): + w_obj = None + return w_obj + +def wrap(space, gcref): + w_obj = try_cast_gcref_to_w_root(gcref) if w_obj is None: w_obj = space.wrap(W_GcRef(gcref)) return w_obj @@ -26,75 +32,74 @@ gcref = rgc.cast_instance_to_gcref(w_obj) return gcref -def get_rpy_objects(space): - """Return a list of all objects (huge). - This contains a lot of GcRefs.""" - result = rgc._get_objects() - return space.newlist([wrap(space, gcref) for gcref in result]) - def get_rpy_referents(space, w_obj): """Return a list of all the referents, as reported by the GC. This is likely to contain a lot of GcRefs.""" gcref = unwrap(space, w_obj) - lst = rgc._get_referents(gcref) + lst = rgc.get_rpy_referents(gcref) return space.newlist([wrap(space, gcref) for gcref in lst]) def get_rpy_memory_usage(space, w_obj): """Return the memory usage of just the given object or GcRef. This does not include the internal structures of the object.""" gcref = unwrap(space, w_obj) - size = rgc._get_memory_usage(gcref) + size = rgc.get_rpy_memory_usage(gcref) return space.wrap(size) +def _list_w_obj_referents(gcref, result_w): + # Get all W_Root reachable directly from gcref, and add them to + # the list 'result_w'. The logic here is not robust against gc + # moves, and may return the same object several times. + seen = {} # map {current_addr: obj} + pending = [gcref] + i = 0 + while i < len(pending): + gcrefparent = pending[i] + i += 1 + for gcref in rgc.get_rpy_referents(gcrefparent): + key = rgc.cast_gcref_to_int(gcref) + if gcref == seen.get(key, rgc.NULL_GCREF): + continue # already in 'seen' + seen[key] = gcref + w_obj = try_cast_gcref_to_w_root(gcref) + if w_obj is not None: + result_w.append(w_obj) + else: + pending.append(gcref) + def get_objects(space): """Return a list of all app-level objects.""" - result = [] - for gcref in rgc._get_objects(): - w_obj = rgc.try_cast_gcref_to_instance(W_Root, gcref) + roots = rgc.get_rpy_roots() + # start from the roots, which is a list of gcrefs that may or may not + # be W_Roots + pending_w = [] # <- list of W_Roots + for gcref in roots: + w_obj = try_cast_gcref_to_w_root(gcref) if w_obj is not None: - if we_are_translated() or hasattr(w_obj, 'typedef'): - result.append(w_obj) - return space.newlist(result) + pending_w.append(w_obj) + else: + _list_w_obj_referents(gcref, pending_w) + # continue by following every W_Root. Note that this will force a hash + # on every W_Root, which is kind of bad, but not on every RPython object, + # which is really good. + result_w = {} + while len(pending_w) > 0: + previous_w = pending_w + pending_w = [] + for w_obj in previous_w: + if w_obj not in result_w: + result_w[w_obj] = None + gcref = rgc.cast_instance_to_gcref(w_obj) + _list_w_obj_referents(gcref, pending_w) + return space.newlist(result_w.keys()) def get_referents(space, args_w): - """Return the list of objects that directly refer to any of objs. + """Return a list of objects directly referred to by any of the arguments. Approximative: follow references recursively until it finds - app-level objects.""" + app-level objects. May return several times the same object, too.""" result = [] - pending = [] for w_obj in args_w: - pending.append(unwrap(space, w_obj)) - i = 0 - while i < len(pending): - gcref = pending[i] - i += 1 - lst = rgc._get_referents(gcref) - for gcref in lst: - w_subobj = rgc.try_cast_gcref_to_instance(W_Root, gcref) - if w_subobj is not None: - result.append(w_subobj) - elif gcref not in pending: - pending.append(gcref) + gcref = rgc.cast_instance_to_gcref(w_obj) + _list_w_obj_referents(gcref, result) return space.newlist(result) get_referents.unwrap_spec = [ObjSpace, 'args_w'] - -def get_memory_usage(space, args_w): - """Return the total size of the object(s) passed as argument. - Approximative: follow references recursively and compute the - total of the sizes, stopping at other app-level objects.""" - result = 0 - pending = [] - for w_obj in args_w: - pending.append(unwrap(space, w_obj)) - i = 0 - while i < len(pending): - gcref = pending[i] - i += 1 - result += rgc._get_memory_usage(gcref) - lst = rgc._get_referents(gcref) - for gcref in lst: - if (rgc.try_cast_gcref_to_instance(W_Root, gcref) is None - and gcref not in pending): - pending.append(gcref) - return space.wrap(result) -get_memory_usage.unwrap_spec = [ObjSpace, 'args_w'] Modified: pypy/branch/gc-module/pypy/module/gc/test/test_referents.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/test/test_referents.py (original) +++ pypy/branch/gc-module/pypy/module/gc/test/test_referents.py Mon Sep 6 18:06:06 2010 @@ -2,35 +2,29 @@ class AppTestReferents(object): + def setup_class(cls): + from pypy.rlib import rgc + cls._backup = [rgc.get_rpy_roots] + w = cls.space.wrap + cls.ALL_ROOTS = [w(4), w([2, 7])] + cls.w_ALL_ROOTS = cls.space.newlist(cls.ALL_ROOTS) + rgc.get_rpy_roots = lambda: map(rgc._GcRef, cls.ALL_ROOTS) + + def teardown_class(cls): + from pypy.rlib import rgc + rgc.get_rpy_roots = cls._backup[0] + def test_get_objects(self): - # XXX this test should be run first, before GcRefs are created. import gc - x = [2, 3, 4] lst = gc.get_objects() - for found in lst: - if found is x: - break - else: - assert 0, "'x' not found in get_rpy_objects" - for found in lst: - if type(found) is gc.GcRef: + assert 2 in lst + assert 4 in lst + assert 7 in lst + assert [2, 7] in lst + for x in lst: + if type(x) is gc.GcRef: assert 0, "get_objects() returned a GcRef" - def test_get_rpy_objects(self): - import gc - x = [2, 3, 4] - lst = gc.get_rpy_objects() - for found in lst: - if found is x: - break - else: - assert 0, "'x' not found in get_rpy_objects" - for found in lst: - if type(found) is gc.GcRef: - break - else: - assert 0, "get_rpy_objects() did not return any GcRef" - def test_get_rpy_referents(self): import gc y = 12345 @@ -64,11 +58,3 @@ x = [y, z] lst = gc.get_referents(x) assert y in lst and z in lst - - def test_get_memory_usage(self): - import gc - x = [2, 5, 10] - n = gc.get_rpy_memory_usage(x) - m = gc.get_memory_usage(x) - print n, m - assert 4 <= n < m <= 128 Modified: pypy/branch/gc-module/pypy/rlib/rgc.py ============================================================================== --- pypy/branch/gc-module/pypy/rlib/rgc.py (original) +++ pypy/branch/gc-module/pypy/rlib/rgc.py Mon Sep 6 18:06:06 2010 @@ -1,6 +1,7 @@ import gc, types from pypy.rpython.extregistry import ExtRegistryEntry from pypy.rlib.objectmodel import we_are_translated +from pypy.rpython.lltypesystem import lltype, llmemory # ____________________________________________________________ # General GC features @@ -93,7 +94,7 @@ def specialize_call(self, hop): from pypy.rpython.error import TyperError - from pypy.rpython.lltypesystem import lltype, llmemory, rtuple + from pypy.rpython.lltypesystem import rtuple from pypy.annotation import model as annmodel from pypy.rpython.memory.gc.marksweep import X_CLONE, X_CLONE_PTR @@ -150,7 +151,6 @@ return annmodel.s_None def specialize_call(self, hop): - from pypy.rpython.lltypesystem import lltype hop.exception_cannot_occur() args_v = [] if len(hop.args_s) == 1: @@ -165,7 +165,6 @@ return annmodel.s_None def specialize_call(self, hop): - from pypy.rpython.lltypesystem import lltype [v_nbytes] = hop.inputargs(lltype.Signed) hop.exception_cannot_occur() return hop.genop('gc_set_max_heap_size', [v_nbytes], @@ -182,7 +181,6 @@ return annmodel.SomeBool() def specialize_call(self, hop): - from pypy.rpython.lltypesystem import lltype hop.exception_cannot_occur() return hop.genop('gc_can_move', hop.args_v, resulttype=hop.r_result) @@ -195,11 +193,9 @@ def compute_result_annotation(self): from pypy.annotation import model as annmodel from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP - from pypy.rpython.lltypesystem import lltype return annmodel.SomePtr(lltype.Ptr(ARRAY_TYPEID_MAP)) def specialize_call(self, hop): - from pypy.rpython.lltypesystem import lltype from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP hop.exception_is_here() return hop.genop('gc_heap_stats', [], resulttype=hop.r_result) @@ -209,7 +205,6 @@ When running directly, will pretend that gc is always moving (might be configurable in a future) """ - from pypy.rpython.lltypesystem import lltype return lltype.nullptr(TP) class MallocNonMovingEntry(ExtRegistryEntry): @@ -221,7 +216,6 @@ return malloc(s_TP, s_n, s_zero=s_zero) def specialize_call(self, hop, i_zero=None): - from pypy.rpython.lltypesystem import lltype # XXX assume flavor and zero to be None by now assert hop.args_s[0].is_constant() vlist = [hop.inputarg(lltype.Void, arg=0)] @@ -243,7 +237,6 @@ def ll_arraycopy(source, dest, source_start, dest_start, length): from pypy.rpython.lltypesystem.lloperation import llop - from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rlib.objectmodel import keepalive_until_here # supports non-overlapping copies only @@ -279,7 +272,6 @@ def ll_shrink_array(p, smallerlength): from pypy.rpython.lltypesystem.lloperation import llop - from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rlib.objectmodel import keepalive_until_here if llop.shrink_array(lltype.Bool, p, smallerlength): @@ -316,17 +308,17 @@ # ____________________________________________________________ -def _get_objects(): - lst = gc.get_objects() - # discard objects that are too random or that are _freeze_=True - return [_GcRef(x) for x in lst if _keep_object(x)] +def get_rpy_roots(): + # Return the 'roots' from the GC. + # This stub is not usable on top of CPython. + raise NotImplementedError -def _get_referents(gcref): +def get_rpy_referents(gcref): x = gcref._x if isinstance(x, list): - return map(_GcRef, x) + d = x elif isinstance(x, dict): - return map(_GcRef, x.keys() + x.values()) + d = x.keys() + x.values() else: d = [] if hasattr(x, '__dict__'): @@ -337,14 +329,14 @@ d.append(getattr(x, slot)) except AttributeError: pass - # discard objects that are too random or that are _freeze_=True - return [_GcRef(x) for x in d if _keep_object(x)] + # discard objects that are too random or that are _freeze_=True + return [_GcRef(x) for x in d if _keep_object(x)] def _keep_object(x): if isinstance(x, type) or type(x) is types.ClassType: return False # don't keep any type - if isinstance(x, (list, dict)): - return True # keep lists and dicts + if isinstance(x, (list, dict, str)): + return True # keep lists and dicts and strings try: return not x._freeze_() # don't keep any frozen object except AttributeError: @@ -352,7 +344,7 @@ except Exception: return False # don't keep objects whose _freeze_() method explodes -def _get_memory_usage(gcref): +def get_rpy_memory_usage(gcref): # approximate implementation using CPython's type info Class = type(gcref._x) size = Class.__basicsize__ @@ -360,6 +352,14 @@ size += Class.__itemsize__ * len(gcref._x) return size +def cast_gcref_to_int(gcref): + if we_are_translated(): + return cast_ptr_to_int(gcref) + else: + return id(gcref._x) + +NULL_GCREF = lltype.nullptr(llmemory.GCREF.TO) + class _GcRef(object): # implementation-specific: there should not be any after translation __slots__ = ['_x'] @@ -368,6 +368,10 @@ def __hash__(self): return object.__hash__(self._x) def __eq__(self, other): + if isinstance(other, lltype._ptr): + assert other == NULL_GCREF, ( + "comparing a _GcRef with a non-NULL lltype ptr") + return False assert isinstance(other, _GcRef) return self._x is other._x def __ne__(self, other): @@ -385,9 +389,21 @@ def try_cast_gcref_to_instance(Class, gcref): # Before translation, unwraps the RPython instance contained in a _GcRef. # After translation, it is a type-check performed by the GC. - # Important: the GC only supports calling this function with one Class - # in the whole RPython program (so it can store a single Yes/No bit in - # gctypelayout.py). if isinstance(gcref._x, Class): return gcref._x return None + +# ------------------- implementation ------------------- + +def s_list_of_gcrefs(): + from pypy.annotation import model as annmodel + from pypy.annotation.listdef import ListDef + s_gcref = annmodel.SomePtr(llmemory.GCREF) + return annmodel.SomeList(ListDef(None, s_gcref, resized=False)) + +class Entry(ExtRegistryEntry): + _about_ = get_rpy_roots + def compute_result_annotation(self): + return s_list_of_gcrefs() + def specialize_call(self, hop): + return hop.genop('gc_get_rpy_roots', [], resulttype = hop.r_result) From cfbolz at codespeak.net Mon Sep 6 18:52:06 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Mon, 6 Sep 2010 18:52:06 +0200 (CEST) Subject: [pypy-svn] r76901 - pypy/branch/jit-bounds/pypy/jit/metainterp/test Message-ID: <20100906165206.9BA62282B90@codespeak.net> Author: cfbolz Date: Mon Sep 6 18:52:05 2010 New Revision: 76901 Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_basic.py Log: Unfortunately, this test passes nicely Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_basic.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_basic.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_basic.py Mon Sep 6 18:52:05 2010 @@ -455,6 +455,31 @@ # the CALL_PURE is constant-folded away by optimizeopt.py self.check_loops(int_sub=1, call=0, call_pure=0) + def test_pure_function_returning_object(self): + myjitdriver = JitDriver(greens = ['m'], reds = ['n']) + class V: + def __init__(self, x): + self.x = x + v1 = V(1) + v2 = V(2) + def externfn(x): + if x: + return v1 + else: + return v2 + externfn._pure_function_ = True + def f(n, m): + while n > 0: + myjitdriver.can_enter_jit(n=n, m=m) + myjitdriver.jit_merge_point(n=n, m=m) + m = V(m).x + n -= externfn(m).x + externfn(m + m - m).x + return n + res = self.meta_interp(f, [21, 5]) + assert res == -1 + # the CALL_PURE is constant-folded away by optimizeopt.py + self.check_loops(int_sub=1, call=0, call_pure=0, getfield_gc=1) + def test_constant_across_mp(self): myjitdriver = JitDriver(greens = [], reds = ['n']) class X(object): From arigo at codespeak.net Mon Sep 6 19:06:54 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 6 Sep 2010 19:06:54 +0200 (CEST) Subject: [pypy-svn] r76902 - in pypy/branch/gc-module/pypy: annotation module/gc module/gc/test rlib rpython rpython/memory/gc rpython/memory/gctransform translator/c/test Message-ID: <20100906170654.34D4A282B90@codespeak.net> Author: arigo Date: Mon Sep 6 19:06:52 2010 New Revision: 76902 Modified: pypy/branch/gc-module/pypy/annotation/binaryop.py pypy/branch/gc-module/pypy/module/gc/__init__.py pypy/branch/gc-module/pypy/module/gc/referents.py pypy/branch/gc-module/pypy/module/gc/test/test_referents.py pypy/branch/gc-module/pypy/rlib/rgc.py pypy/branch/gc-module/pypy/rpython/memory/gc/base.py pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py pypy/branch/gc-module/pypy/rpython/rptr.py pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py Log: Implement get_rpy_roots() and get_rpy_referents() in the GC. Modified: pypy/branch/gc-module/pypy/annotation/binaryop.py ============================================================================== --- pypy/branch/gc-module/pypy/annotation/binaryop.py (original) +++ pypy/branch/gc-module/pypy/annotation/binaryop.py Mon Sep 6 19:06:52 2010 @@ -594,7 +594,9 @@ getitem_idx_key = getitem_idx def setitem((lst1, int2), s_value): - getbookkeeper().count("list_setitem", int2) + if lst1.listdef.listitem.s_value.contains(s_value): + return # already up-to-date + getbookkeeper().count("list_setitem", int2) lst1.listdef.mutate() lst1.listdef.generalize(s_value) setitem.can_only_throw = [IndexError] Modified: pypy/branch/gc-module/pypy/module/gc/__init__.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/__init__.py (original) +++ pypy/branch/gc-module/pypy/module/gc/__init__.py Mon Sep 6 19:06:52 2010 @@ -18,6 +18,7 @@ if (not space.config.translating or space.config.translation.gctransformer == "framework"): self.interpleveldefs.update({ + 'get_rpy_roots': 'referents.get_rpy_roots', 'get_rpy_referents': 'referents.get_rpy_referents', 'get_rpy_memory_usage': 'referents.get_rpy_memory_usage', 'get_objects': 'referents.get_objects', Modified: pypy/branch/gc-module/pypy/module/gc/referents.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/referents.py (original) +++ pypy/branch/gc-module/pypy/module/gc/referents.py Mon Sep 6 19:06:52 2010 @@ -32,6 +32,10 @@ gcref = rgc.cast_instance_to_gcref(w_obj) return gcref +def get_rpy_roots(space): + lst = rgc.get_rpy_roots() + return space.newlist([wrap(space, gcref) for gcref in lst if gcref]) + def get_rpy_referents(space, w_obj): """Return a list of all the referents, as reported by the GC. This is likely to contain a lot of GcRefs.""" @@ -74,11 +78,12 @@ # be W_Roots pending_w = [] # <- list of W_Roots for gcref in roots: - w_obj = try_cast_gcref_to_w_root(gcref) - if w_obj is not None: - pending_w.append(w_obj) - else: - _list_w_obj_referents(gcref, pending_w) + if gcref: + w_obj = try_cast_gcref_to_w_root(gcref) + if w_obj is not None: + pending_w.append(w_obj) + else: + _list_w_obj_referents(gcref, pending_w) # continue by following every W_Root. Note that this will force a hash # on every W_Root, which is kind of bad, but not on every RPython object, # which is really good. Modified: pypy/branch/gc-module/pypy/module/gc/test/test_referents.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/test/test_referents.py (original) +++ pypy/branch/gc-module/pypy/module/gc/test/test_referents.py Mon Sep 6 19:06:52 2010 @@ -6,9 +6,12 @@ from pypy.rlib import rgc cls._backup = [rgc.get_rpy_roots] w = cls.space.wrap - cls.ALL_ROOTS = [w(4), w([2, 7])] + class RandomRPythonObject(object): + pass + cls.ALL_ROOTS = [w(4), w([2, 7]), RandomRPythonObject()] cls.w_ALL_ROOTS = cls.space.newlist(cls.ALL_ROOTS) - rgc.get_rpy_roots = lambda: map(rgc._GcRef, cls.ALL_ROOTS) + rgc.get_rpy_roots = lambda: ( + map(rgc._GcRef, cls.ALL_ROOTS) + [rgc.NULL_GCREF]*17) def teardown_class(cls): from pypy.rlib import rgc @@ -25,6 +28,14 @@ if type(x) is gc.GcRef: assert 0, "get_objects() returned a GcRef" + def test_get_rpy_roots(self): + import gc + lst = gc.get_rpy_roots() + assert lst[0] == 4 + assert lst[1] == [2, 7] + assert type(lst[2]) is gc.GcRef + assert len(lst) == 3 + def test_get_rpy_referents(self): import gc y = 12345 Modified: pypy/branch/gc-module/pypy/rlib/rgc.py ============================================================================== --- pypy/branch/gc-module/pypy/rlib/rgc.py (original) +++ pypy/branch/gc-module/pypy/rlib/rgc.py Mon Sep 6 19:06:52 2010 @@ -311,6 +311,7 @@ def get_rpy_roots(): # Return the 'roots' from the GC. # This stub is not usable on top of CPython. + # The gc typically returns a list that ends with a few NULL_GCREFs. raise NotImplementedError def get_rpy_referents(gcref): @@ -395,11 +396,17 @@ # ------------------- implementation ------------------- +_cache_s_list_of_gcrefs = None + def s_list_of_gcrefs(): - from pypy.annotation import model as annmodel - from pypy.annotation.listdef import ListDef - s_gcref = annmodel.SomePtr(llmemory.GCREF) - return annmodel.SomeList(ListDef(None, s_gcref, resized=False)) + global _cache_s_list_of_gcrefs + if _cache_s_list_of_gcrefs is None: + from pypy.annotation import model as annmodel + from pypy.annotation.listdef import ListDef + s_gcref = annmodel.SomePtr(llmemory.GCREF) + _cache_s_list_of_gcrefs = annmodel.SomeList( + ListDef(None, s_gcref, resized=False)) + return _cache_s_list_of_gcrefs class Entry(ExtRegistryEntry): _about_ = get_rpy_roots @@ -407,3 +414,14 @@ return s_list_of_gcrefs() def specialize_call(self, hop): return hop.genop('gc_get_rpy_roots', [], resulttype = hop.r_result) + +class Entry(ExtRegistryEntry): + _about_ = get_rpy_referents + def compute_result_annotation(self, s_gcref): + from pypy.annotation import model as annmodel + assert annmodel.SomePtr(llmemory.GCREF).contains(s_gcref) + return s_list_of_gcrefs() + def specialize_call(self, hop): + vlist = hop.inputargs(hop.args_r[0]) + return hop.genop('gc_get_rpy_referents', vlist, + resulttype = hop.r_result) Modified: pypy/branch/gc-module/pypy/rpython/memory/gc/base.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/memory/gc/base.py (original) +++ pypy/branch/gc-module/pypy/rpython/memory/gc/base.py Mon Sep 6 19:06:52 2010 @@ -255,6 +255,84 @@ finally: self.finalizer_lock_count -= 1 + # ---------- implementation of pypy.rlib.rgc.get_rpy_roots() ---------- + + def _counting_rpy_root(self, root): + self._count_rpy += 1 + + def _do_count_rpy_roots(self): + self._count_rpy = 0 + self.root_walker.walk_roots( + GCBase._counting_rpy_root, + GCBase._counting_rpy_root, + GCBase._counting_rpy_root) + return self._count_rpy + + def _append_rpy_root(self, root): + # Can use the gc list, but should not allocate! + # It is essential that the list is not resizable! + lst = self._list_rpy + index = self._count_rpy + if index >= len(lst): + raise ValueError + self._count_rpy = index + 1 + lst[index] = llmemory.cast_adr_to_ptr(root.address[0], llmemory.GCREF) + + def _do_append_rpy_roots(self, lst): + self._count_rpy = 0 + self._list_rpy = lst + self.root_walker.walk_roots( + GCBase._append_rpy_root, + GCBase._append_rpy_root, + GCBase._append_rpy_root) + self._list_rpy = None + + def get_rpy_roots(self): + count = self._do_count_rpy_roots() + extra = 16 + while True: + result = [lltype.nullptr(llmemory.GCREF.TO)] * (count + extra) + try: + self._do_append_rpy_roots(result) + except ValueError: + extra *= 3 + else: + return result + + # ---------- implementation of pypy.rlib.rgc.get_rpy_referents() ---------- + + def _count_rpy_referent(self, pointer, _): + self._count_rpy += 1 + + def _do_count_rpy_referents(self, gcref): + self._count_rpy = 0 + self.trace(llmemory.cast_ptr_to_adr(gcref), + self._count_rpy_referent, None) + return self._count_rpy + + def _append_rpy_referent(self, pointer, _): + # Can use the gc list, but should not allocate! + # It is essential that the list is not resizable! + lst = self._list_rpy + index = self._count_rpy + if index >= len(lst): + raise ValueError + self._count_rpy = index + 1 + lst[index] = llmemory.cast_adr_to_ptr(pointer.address[0], + llmemory.GCREF) + + def _do_append_rpy_referents(self, gcref, lst): + self._count_rpy = 0 + self._list_rpy = lst + self.trace(llmemory.cast_ptr_to_adr(gcref), + self._append_rpy_referent, None) + + def get_rpy_referents(self, gcref): + count = self._do_count_rpy_referents(gcref) + result = [lltype.nullptr(llmemory.GCREF.TO)] * count + self._do_append_rpy_referents(gcref, result) + return result + class MovingGCBase(GCBase): moving_gc = True Modified: pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py (original) +++ pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py Mon Sep 6 19:06:52 2010 @@ -7,7 +7,7 @@ from pypy.rpython.memory.gc import marksweep from pypy.rpython.memory.gcheader import GCHeaderBuilder from pypy.rlib.rarithmetic import ovfcheck -from pypy.rlib import rstack +from pypy.rlib import rstack, rgc from pypy.rlib.debug import ll_assert from pypy.translator.backendopt import graphanalyze from pypy.translator.backendopt.support import var_needsgc @@ -388,6 +388,15 @@ else: self.id_ptr = None + self.get_rpy_roots_ptr = getfn(GCClass.get_rpy_roots.im_func, + [s_gc], + rgc.s_list_of_gcrefs(), + minimal_transform=False) + self.get_rpy_referents_ptr = getfn(GCClass.get_rpy_referents.im_func, + [s_gc, s_gcref], + rgc.s_list_of_gcrefs(), + minimal_transform=False) + self.set_max_heap_size_ptr = getfn(GCClass.set_max_heap_size.im_func, [s_gc, annmodel.SomeInteger(nonneg=True)], @@ -883,6 +892,21 @@ def gct_gc_get_type_info_group(self, hop): return hop.cast_result(self.c_type_info_group) + def gct_gc_get_rpy_roots(self, hop): + livevars = self.push_roots(hop) + hop.genop("direct_call", + [self.get_rpy_roots_ptr, self.c_const_gc], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + + def gct_gc_get_rpy_referents(self, hop): + livevars = self.push_roots(hop) + [v_ptr] = hop.spaceop.args + hop.genop("direct_call", + [self.get_rpy_referents_ptr, self.c_const_gc, v_ptr], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + def gct_malloc_nonmovable_varsize(self, hop): TYPE = hop.spaceop.result.concretetype if self.gcdata.gc.can_malloc_nonmovable(): Modified: pypy/branch/gc-module/pypy/rpython/rptr.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/rptr.py (original) +++ pypy/branch/gc-module/pypy/rpython/rptr.py Mon Sep 6 19:06:52 2010 @@ -35,6 +35,9 @@ id = lltype.cast_ptr_to_int(p) return ll_str.ll_int2hex(r_uint(id), True) + def get_ll_eq_function(self): + return None + def rtype_getattr(self, hop): attr = hop.args_s[1].const if isinstance(hop.s_result, annmodel.SomeLLADTMeth): Modified: pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py ============================================================================== --- pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py (original) +++ pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py Mon Sep 6 19:06:52 2010 @@ -2,7 +2,7 @@ import sys, os, inspect from pypy.objspace.flow.model import summary -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.memory.test import snippet from pypy.rlib import rgc @@ -23,6 +23,7 @@ GC_CAN_SHRINK_ARRAY = False _isolated_func = None + c_allfuncs = None @classmethod def _makefunc_str_int(cls, f): @@ -891,6 +892,56 @@ def test_arraycopy_writebarrier_ptr(self): self.run("arraycopy_writebarrier_ptr") + def define_get_rpy_roots(self): + U = lltype.GcStruct('U', ('x', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + + def g(s): + lst = rgc.get_rpy_roots() + found = False + for x in lst: + if x == lltype.cast_opaque_ptr(llmemory.GCREF, s): + found = True + if x == lltype.cast_opaque_ptr(llmemory.GCREF, s.u): + os.write(2, "s.u should not be found!\n") + assert False + return found == 1 + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + found = g(s) + if not found: + os.write(2, "not found!\n") + assert False + s.u.x = 42 + return 0 + + return fn + + def test_get_rpy_roots(self): + self.run("get_rpy_roots") + + def define_get_rpy_referents(self): + U = lltype.GcStruct('U', ('x', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, s.u) + lst = rgc.get_rpy_referents(gcref1) + assert gcref2 in lst + assert gcref1 not in lst + s.u.x = 42 + return 0 + + return fn + + def test_get_rpy_referents(self): + self.run("get_rpy_referents") + class TestSemiSpaceGC(TestUsingFramework, snippet.SemiSpaceGCTestDefines): gcpolicy = "semispace" From arigo at codespeak.net Mon Sep 6 19:12:11 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 6 Sep 2010 19:12:11 +0200 (CEST) Subject: [pypy-svn] r76903 - pypy/branch/gc-module/pypy/annotation Message-ID: <20100906171211.7583C282B90@codespeak.net> Author: arigo Date: Mon Sep 6 19:12:09 2010 New Revision: 76903 Modified: pypy/branch/gc-module/pypy/annotation/binaryop.py Log: Oups! That was bogus. Modified: pypy/branch/gc-module/pypy/annotation/binaryop.py ============================================================================== --- pypy/branch/gc-module/pypy/annotation/binaryop.py (original) +++ pypy/branch/gc-module/pypy/annotation/binaryop.py Mon Sep 6 19:12:09 2010 @@ -594,9 +594,7 @@ getitem_idx_key = getitem_idx def setitem((lst1, int2), s_value): - if lst1.listdef.listitem.s_value.contains(s_value): - return # already up-to-date - getbookkeeper().count("list_setitem", int2) + getbookkeeper().count("list_setitem", int2) lst1.listdef.mutate() lst1.listdef.generalize(s_value) setitem.can_only_throw = [IndexError] From arigo at codespeak.net Mon Sep 6 19:13:44 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 6 Sep 2010 19:13:44 +0200 (CEST) Subject: [pypy-svn] r76904 - pypy/branch/gc-module/pypy/rlib Message-ID: <20100906171344.6A24F282B90@codespeak.net> Author: arigo Date: Mon Sep 6 19:13:42 2010 New Revision: 76904 Modified: pypy/branch/gc-module/pypy/rlib/rgc.py Log: That's the proper fix. Modified: pypy/branch/gc-module/pypy/rlib/rgc.py ============================================================================== --- pypy/branch/gc-module/pypy/rlib/rgc.py (original) +++ pypy/branch/gc-module/pypy/rlib/rgc.py Mon Sep 6 19:13:42 2010 @@ -405,7 +405,7 @@ from pypy.annotation.listdef import ListDef s_gcref = annmodel.SomePtr(llmemory.GCREF) _cache_s_list_of_gcrefs = annmodel.SomeList( - ListDef(None, s_gcref, resized=False)) + ListDef(None, s_gcref, mutated=True, resized=False)) return _cache_s_list_of_gcrefs class Entry(ExtRegistryEntry): From arigo at codespeak.net Mon Sep 6 19:59:47 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 6 Sep 2010 19:59:47 +0200 (CEST) Subject: [pypy-svn] r76905 - in pypy/branch/gc-module/pypy: rlib rpython/memory rpython/memory/gc rpython/memory/gctransform translator/c/test Message-ID: <20100906175947.B11C6282B90@codespeak.net> Author: arigo Date: Mon Sep 6 19:59:45 2010 New Revision: 76905 Modified: pypy/branch/gc-module/pypy/rlib/rgc.py pypy/branch/gc-module/pypy/rpython/memory/gc/base.py pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py pypy/branch/gc-module/pypy/rpython/memory/gctypelayout.py pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py Log: Implement translation of try_cast_gcref_to_instance() using an internal helper, rgc._is_rpy_instance(), whose answer is stored as a flag in the GC type layout. Modified: pypy/branch/gc-module/pypy/rlib/rgc.py ============================================================================== --- pypy/branch/gc-module/pypy/rlib/rgc.py (original) +++ pypy/branch/gc-module/pypy/rlib/rgc.py Mon Sep 6 19:59:45 2010 @@ -309,12 +309,14 @@ # ____________________________________________________________ def get_rpy_roots(): + "NOT_RPYTHON" # Return the 'roots' from the GC. # This stub is not usable on top of CPython. # The gc typically returns a list that ends with a few NULL_GCREFs. raise NotImplementedError def get_rpy_referents(gcref): + "NOT_RPYTHON" x = gcref._x if isinstance(x, list): d = x @@ -346,6 +348,7 @@ return False # don't keep objects whose _freeze_() method explodes def get_rpy_memory_usage(gcref): + "NOT_RPYTHON" # approximate implementation using CPython's type info Class = type(gcref._x) size = Class.__basicsize__ @@ -385,14 +388,32 @@ def cast_instance_to_gcref(x): # Before translation, casts an RPython instance into a _GcRef. # After translation, it is a variant of cast_object_to_ptr(GCREF). - return _GcRef(x) + if we_are_translated(): + from pypy.rpython import annlowlevel + x = annlowlevel.cast_instance_to_base_ptr(x) + return lltype.cast_opaque_ptr(llmemory.GCREF, x) + else: + return _GcRef(x) +cast_instance_to_gcref._annspecialcase_ = 'specialize:argtype(0)' def try_cast_gcref_to_instance(Class, gcref): # Before translation, unwraps the RPython instance contained in a _GcRef. # After translation, it is a type-check performed by the GC. - if isinstance(gcref._x, Class): - return gcref._x - return None + if we_are_translated(): + from pypy.rpython.annlowlevel import base_ptr_lltype + from pypy.rpython.annlowlevel import cast_base_ptr_to_instance + from pypy.rpython.lltypesystem import rclass + if _is_rpy_instance(gcref): + objptr = lltype.cast_opaque_ptr(base_ptr_lltype(), gcref) + clsptr = _get_llcls_from_cls(Class) + if rclass.ll_isinstance(objptr, clsptr): + return cast_base_ptr_to_instance(Class, objptr) + return None + else: + if isinstance(gcref._x, Class): + return gcref._x + return None +try_cast_gcref_to_instance._annspecialcase_ = 'specialize:arg(0)' # ------------------- implementation ------------------- @@ -425,3 +446,39 @@ vlist = hop.inputargs(hop.args_r[0]) return hop.genop('gc_get_rpy_referents', vlist, resulttype = hop.r_result) + +def _is_rpy_instance(gcref): + "NOT_RPYTHON" + raise NotImplementedError + +def _get_llcls_from_cls(Class): + "NOT_RPYTHON" + raise NotImplementedError + +class Entry(ExtRegistryEntry): + _about_ = _is_rpy_instance + def compute_result_annotation(self, s_gcref): + from pypy.annotation import model as annmodel + return annmodel.SomeBool() + def specialize_call(self, hop): + vlist = hop.inputargs(hop.args_r[0]) + return hop.genop('gc_is_rpy_instance', vlist, + resulttype = hop.r_result) + +class Entry(ExtRegistryEntry): + _about_ = _get_llcls_from_cls + def compute_result_annotation(self, s_Class): + from pypy.annotation import model as annmodel + from pypy.rpython.lltypesystem import rclass + assert s_Class.is_constant() + return annmodel.SomePtr(rclass.CLASSTYPE) + def specialize_call(self, hop): + from pypy.rpython.rclass import getclassrepr + from pypy.objspace.flow.model import Constant + from pypy.rpython.lltypesystem import rclass + Class = hop.args_s[0].const + classdef = hop.rtyper.annotator.bookkeeper.getuniqueclassdef(Class) + classrepr = getclassrepr(hop.rtyper, classdef) + vtable = classrepr.getvtable() + assert lltype.typeOf(vtable) == rclass.CLASSTYPE + return Constant(vtable, concretetype=rclass.CLASSTYPE) Modified: pypy/branch/gc-module/pypy/rpython/memory/gc/base.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/memory/gc/base.py (original) +++ pypy/branch/gc-module/pypy/rpython/memory/gc/base.py Mon Sep 6 19:59:45 2010 @@ -53,7 +53,8 @@ varsize_offset_to_length, varsize_offsets_to_gcpointers_in_var_part, weakpointer_offset, - member_index): + member_index, + is_rpython_class): self.getfinalizer = getfinalizer self.is_varsize = is_varsize self.has_gcptr_in_varsize = has_gcptr_in_varsize @@ -66,6 +67,7 @@ self.varsize_offsets_to_gcpointers_in_var_part = varsize_offsets_to_gcpointers_in_var_part self.weakpointer_offset = weakpointer_offset self.member_index = member_index + self.is_rpython_class = is_rpython_class def get_member_index(self, type_id): return self.member_index(type_id) @@ -333,6 +335,12 @@ self._do_append_rpy_referents(gcref, result) return result + # ---------- + + def is_rpy_instance(self, gcref): + typeid = self.get_type_id(llmemory.cast_ptr_to_adr(gcref)) + return self.is_rpython_class(typeid) + class MovingGCBase(GCBase): moving_gc = True Modified: pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py (original) +++ pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py Mon Sep 6 19:59:45 2010 @@ -396,6 +396,10 @@ [s_gc, s_gcref], rgc.s_list_of_gcrefs(), minimal_transform=False) + self.is_rpy_instance_ptr = getfn(GCClass.is_rpy_instance.im_func, + [s_gc, s_gcref], + annmodel.SomeBool(), + minimal_transform=False) self.set_max_heap_size_ptr = getfn(GCClass.set_max_heap_size.im_func, [s_gc, @@ -907,6 +911,14 @@ resultvar=hop.spaceop.result) self.pop_roots(hop, livevars) + def gct_gc_is_rpy_instance(self, hop): + livevars = self.push_roots(hop) + [v_ptr] = hop.spaceop.args + hop.genop("direct_call", + [self.is_rpy_instance_ptr, self.c_const_gc, v_ptr], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + def gct_malloc_nonmovable_varsize(self, hop): TYPE = hop.spaceop.result.concretetype if self.gcdata.gc.can_malloc_nonmovable(): Modified: pypy/branch/gc-module/pypy/rpython/memory/gctypelayout.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/memory/gctypelayout.py (original) +++ pypy/branch/gc-module/pypy/rpython/memory/gctypelayout.py Mon Sep 6 19:59:45 2010 @@ -100,6 +100,10 @@ infobits = self.get(typeid).infobits return infobits & T_MEMBER_INDEX + def q_is_rpython_class(self, typeid): + infobits = self.get(typeid).infobits + return infobits & T_IS_RPYTHON_INSTANCE != 0 + def set_query_functions(self, gc): gc.set_query_functions( self.q_is_varsize, @@ -113,7 +117,8 @@ self.q_varsize_offset_to_length, self.q_varsize_offsets_to_gcpointers_in_var_part, self.q_weakpointer_offset, - self.q_member_index) + self.q_member_index, + self.q_is_rpython_class) # the lowest 16bits are used to store group member index @@ -122,6 +127,7 @@ T_HAS_GCPTR_IN_VARSIZE = 0x20000 T_IS_GCARRAY_OF_GCPTR = 0x40000 T_IS_WEAKREF = 0x80000 +T_IS_RPYTHON_INSTANCE = 0x100000 # the type is a subclass of OBJECT T_KEY_MASK = 0xFF000000 T_KEY_VALUE = 0x7A000000 # bug detection only @@ -180,6 +186,8 @@ varinfo.varitemsize = llmemory.sizeof(ARRAY.OF) if builder.is_weakref_type(TYPE): infobits |= T_IS_WEAKREF + if is_subclass_of_object(TYPE): + infobits |= T_IS_RPYTHON_INSTANCE info.infobits = infobits | T_KEY_VALUE # ____________________________________________________________ @@ -258,9 +266,7 @@ else: # no vtable from lltype2vtable -- double-check to be sure # that it's not a subclass of OBJECT. - while isinstance(TYPE, lltype.GcStruct): - assert TYPE is not rclass.OBJECT - _, TYPE = TYPE._first_struct() + assert not is_subclass_of_object(TYPE) def get_info(self, type_id): res = llop.get_group_member(GCData.TYPE_INFO_PTR, @@ -436,6 +442,13 @@ for i in range(p._obj.getlength()): zero_gc_pointers_inside(p[i], ITEM) +def is_subclass_of_object(TYPE): + while isinstance(TYPE, lltype.GcStruct): + if TYPE is rclass.OBJECT: + return True + _, TYPE = TYPE._first_struct() + return False + ########## weakrefs ########## # framework: weakref objects are small structures containing only an address Modified: pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py ============================================================================== --- pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py (original) +++ pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py Mon Sep 6 19:59:45 2010 @@ -942,6 +942,66 @@ def test_get_rpy_referents(self): self.run("get_rpy_referents") + def define_is_rpy_instance(self): + class Foo: + pass + S = lltype.GcStruct('S', ('x', lltype.Signed)) + + def check(gcref, expected): + result = rgc._is_rpy_instance(gcref) + assert result == expected + + def fn(): + s = lltype.malloc(S) + gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + check(gcref1, False) + + f = Foo() + gcref3 = rgc.cast_instance_to_gcref(f) + check(gcref3, True) + + return 0 + + return fn + + def test_is_rpy_instance(self): + self.run("is_rpy_instance") + + def define_try_cast_gcref_to_instance(self): + class Foo: + pass + class FooBar(Foo): + pass + class Biz(object): + pass + S = lltype.GcStruct('S', ('x', lltype.Signed)) + + def fn(): + foo = Foo() + gcref1 = rgc.cast_instance_to_gcref(foo) + assert rgc.try_cast_gcref_to_instance(Foo, gcref1) is foo + assert rgc.try_cast_gcref_to_instance(FooBar, gcref1) is None + assert rgc.try_cast_gcref_to_instance(Biz, gcref1) is None + + foobar = FooBar() + gcref2 = rgc.cast_instance_to_gcref(foobar) + assert rgc.try_cast_gcref_to_instance(Foo, gcref2) is foobar + assert rgc.try_cast_gcref_to_instance(FooBar, gcref2) is foobar + assert rgc.try_cast_gcref_to_instance(Biz, gcref2) is None + + s = lltype.malloc(S) + gcref3 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + assert rgc.try_cast_gcref_to_instance(Foo, gcref3) is None + assert rgc.try_cast_gcref_to_instance(FooBar, gcref3) is None + assert rgc.try_cast_gcref_to_instance(Biz, gcref3) is None + + return 0 + + return fn + + def test_try_cast_gcref_to_instance(self): + self.run("try_cast_gcref_to_instance") + class TestSemiSpaceGC(TestUsingFramework, snippet.SemiSpaceGCTestDefines): gcpolicy = "semispace" From antocuni at codespeak.net Mon Sep 6 21:29:36 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Mon, 6 Sep 2010 21:29:36 +0200 (CEST) Subject: [pypy-svn] r76906 - pypy/build/bot2/pypybuildbot Message-ID: <20100906192936.9F78E282B90@codespeak.net> Author: antocuni Date: Mon Sep 6 21:29:31 2010 New Revision: 76906 Modified: pypy/build/bot2/pypybuildbot/pypylist.py Log: don't put a dummy link around "None"s Modified: pypy/build/bot2/pypybuildbot/pypylist.py ============================================================================== --- pypy/build/bot2/pypybuildbot/pypylist.py (original) +++ pypy/build/bot2/pypybuildbot/pypylist.py Mon Sep 6 21:29:31 2010 @@ -155,8 +155,8 @@ %(text)s %(size)s %(date)s - %(own_summary)s - %(app_summary)s + %(own_summary)s + %(app_summary)s """ @@ -190,11 +190,11 @@ branch = '%3Ctrunk%3E' # if category: href = '/summary?category=%s&branch=%s&recentrev=%s' % (category, branch, rev) + str_summary = '%s' % (href, summary) else: - href = '#' - element[prefix + 'summary'] = summary + str_summary = str(summary) + element[prefix + 'summary'] = str_summary element[prefix + 'summary_class'] = self._get_summary_class(summary, rowClass) - element[prefix + 'href'] = href def _get_branch(self): parts = self.path.split(os.path.sep) From hakanardo at codespeak.net Tue Sep 7 07:25:16 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Tue, 7 Sep 2010 07:25:16 +0200 (CEST) Subject: [pypy-svn] r76907 - pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt Message-ID: <20100907052516.449B8282B90@codespeak.net> Author: hakanardo Date: Tue Sep 7 07:25:13 2010 New Revision: 76907 Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/rewrite.py Log: moved some optimizations infront of the heap optimizer Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py Tue Sep 7 07:25:13 2010 @@ -416,89 +416,9 @@ # in this case self.emit_operation(op) - def _optimize_nullness(self, op, box, expect_nonnull): - value = self.getvalue(box) - if value.is_nonnull(): - self.make_constant_int(op.result, expect_nonnull) - elif value.is_null(): - self.make_constant_int(op.result, not expect_nonnull) - else: - self.optimize_default(op) - - def optimize_INT_IS_TRUE(self, op): - if self.getvalue(op.args[0]) in self.bool_boxes: - self.make_equal_to(op.result, self.getvalue(op.args[0])) - return - self._optimize_nullness(op, op.args[0], True) - - def optimize_INT_IS_ZERO(self, op): - self._optimize_nullness(op, op.args[0], False) - - def _optimize_oois_ooisnot(self, op, expect_isnot): - value0 = self.getvalue(op.args[0]) - value1 = self.getvalue(op.args[1]) - if value0.is_virtual(): - if value1.is_virtual(): - intres = (value0 is value1) ^ expect_isnot - self.make_constant_int(op.result, intres) - else: - self.make_constant_int(op.result, expect_isnot) - elif value1.is_virtual(): - self.make_constant_int(op.result, expect_isnot) - elif value1.is_null(): - self._optimize_nullness(op, op.args[0], expect_isnot) - elif value0.is_null(): - self._optimize_nullness(op, op.args[1], expect_isnot) - elif value0 is value1: - self.make_constant_int(op.result, not expect_isnot) - else: - cls0 = value0.get_constant_class(self.cpu) - if cls0 is not None: - cls1 = value1.get_constant_class(self.cpu) - if cls1 is not None and not cls0.same_constant(cls1): - # cannot be the same object, as we know that their - # class is different - self.make_constant_int(op.result, expect_isnot) - return - self.optimize_default(op) - - def optimize_PTR_NE(self, op): - self._optimize_oois_ooisnot(op, True) - - def optimize_PTR_EQ(self, op): - self._optimize_oois_ooisnot(op, False) - - def optimize_INSTANCEOF(self, op): - value = self.getvalue(op.args[0]) - realclassbox = value.get_constant_class(self.cpu) - if realclassbox is not None: - checkclassbox = self.cpu.typedescr2classbox(op.descr) - result = self.cpu.ts.subclassOf(self.cpu, realclassbox, - checkclassbox) - self.make_constant_int(op.result, result) - return - self.emit_operation(op) - def optimize_DEBUG_MERGE_POINT(self, op): self.emit_operation(op) - def optimize_CALL_LOOPINVARIANT(self, op): - funcvalue = self.getvalue(op.args[0]) - if not funcvalue.is_constant(): - self.optimize_default(op) - return - key = make_hashable_int(op.args[0].getint()) - resvalue = self.loop_invariant_results.get(key, None) - if resvalue is not None: - self.make_equal_to(op.result, resvalue) - return - # change the op to be a normal call, from the backend's point of view - # there is no reason to have a separate operation for this - op.opnum = rop.CALL - self.optimize_default(op) - resvalue = self.getvalue(op.result) - self.loop_invariant_results[key] = resvalue - optimize_ops = _findall(Optimizer, 'optimize_') Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/rewrite.py Tue Sep 7 07:25:13 2010 @@ -238,6 +238,87 @@ self.emit_operation(op) self.optimizer.exception_might_have_happened = False + def optimize_CALL_LOOPINVARIANT(self, op): + funcvalue = self.getvalue(op.args[0]) + if not funcvalue.is_constant(): + self.emit_operation(op) + return + key = make_hashable_int(op.args[0].getint()) + resvalue = self.optimizer.loop_invariant_results.get(key, None) + if resvalue is not None: + self.make_equal_to(op.result, resvalue) + return + # change the op to be a normal call, from the backend's point of view + # there is no reason to have a separate operation for this + op.opnum = rop.CALL + self.emit_operation(op) + resvalue = self.getvalue(op.result) + self.optimizer.loop_invariant_results[key] = resvalue + + def _optimize_nullness(self, op, box, expect_nonnull): + value = self.getvalue(box) + if value.is_nonnull(): + self.make_constant_int(op.result, expect_nonnull) + elif value.is_null(): + self.make_constant_int(op.result, not expect_nonnull) + else: + self.emit_operation(op) + + def optimize_INT_IS_TRUE(self, op): + if self.getvalue(op.args[0]) in self.optimizer.bool_boxes: + self.make_equal_to(op.result, self.getvalue(op.args[0])) + return + self._optimize_nullness(op, op.args[0], True) + + def optimize_INT_IS_ZERO(self, op): + self._optimize_nullness(op, op.args[0], False) + + def _optimize_oois_ooisnot(self, op, expect_isnot): + value0 = self.getvalue(op.args[0]) + value1 = self.getvalue(op.args[1]) + if value0.is_virtual(): + if value1.is_virtual(): + intres = (value0 is value1) ^ expect_isnot + self.make_constant_int(op.result, intres) + else: + self.make_constant_int(op.result, expect_isnot) + elif value1.is_virtual(): + self.make_constant_int(op.result, expect_isnot) + elif value1.is_null(): + self._optimize_nullness(op, op.args[0], expect_isnot) + elif value0.is_null(): + self._optimize_nullness(op, op.args[1], expect_isnot) + elif value0 is value1: + self.make_constant_int(op.result, not expect_isnot) + else: + cls0 = value0.get_constant_class(self.optimizer.cpu) + if cls0 is not None: + cls1 = value1.get_constant_class(self.optimizer.cpu) + if cls1 is not None and not cls0.same_constant(cls1): + # cannot be the same object, as we know that their + # class is different + self.make_constant_int(op.result, expect_isnot) + return + self.emit_operation(op) + + def optimize_PTR_NE(self, op): + self._optimize_oois_ooisnot(op, True) + + def optimize_PTR_EQ(self, op): + self._optimize_oois_ooisnot(op, False) + + def optimize_INSTANCEOF(self, op): + value = self.getvalue(op.args[0]) + realclassbox = value.get_constant_class(self.optimizer.cpu) + if realclassbox is not None: + checkclassbox = self.optimizer.cpu.typedescr2classbox(op.descr) + result = self.optimizer.cpu.ts.subclassOf(self.optimizer.cpu, + realclassbox, + checkclassbox) + self.make_constant_int(op.result, result) + return + self.emit_operation(op) + optimize_ops = _findall(OptRewrite, 'optimize_') From arigo at codespeak.net Tue Sep 7 11:13:58 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 7 Sep 2010 11:13:58 +0200 (CEST) Subject: [pypy-svn] r76908 - in pypy/branch/gc-module/pypy: rlib rpython/memory/gc rpython/memory/gctransform translator/c/test Message-ID: <20100907091358.64E9B282B90@codespeak.net> Author: arigo Date: Tue Sep 7 11:13:56 2010 New Revision: 76908 Modified: pypy/branch/gc-module/pypy/rlib/rgc.py pypy/branch/gc-module/pypy/rpython/memory/gc/base.py pypy/branch/gc-module/pypy/rpython/memory/gc/markcompact.py pypy/branch/gc-module/pypy/rpython/memory/gc/semispace.py pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py Log: Translation of get_rpy_memory_usage(). Modified: pypy/branch/gc-module/pypy/rlib/rgc.py ============================================================================== --- pypy/branch/gc-module/pypy/rlib/rgc.py (original) +++ pypy/branch/gc-module/pypy/rlib/rgc.py Tue Sep 7 11:13:56 2010 @@ -356,6 +356,12 @@ size += Class.__itemsize__ * len(gcref._x) return size +def get_rpy_typeid(gcref): + "NOT_RPYTHON" + from pypy.rlib.rarithmetic import intmask + Class = gcref._x.__class__ + return intmask(id(Class)) + def cast_gcref_to_int(gcref): if we_are_translated(): return cast_ptr_to_int(gcref) @@ -447,6 +453,26 @@ return hop.genop('gc_get_rpy_referents', vlist, resulttype = hop.r_result) +class Entry(ExtRegistryEntry): + _about_ = get_rpy_memory_usage + def compute_result_annotation(self, s_gcref): + from pypy.annotation import model as annmodel + return annmodel.SomeInteger() + def specialize_call(self, hop): + vlist = hop.inputargs(hop.args_r[0]) + return hop.genop('gc_get_rpy_memory_usage', vlist, + resulttype = hop.r_result) + +class Entry(ExtRegistryEntry): + _about_ = get_rpy_typeid + def compute_result_annotation(self, s_gcref): + from pypy.annotation import model as annmodel + return annmodel.SomeInteger() + def specialize_call(self, hop): + vlist = hop.inputargs(hop.args_r[0]) + return hop.genop('gc_get_rpy_typeid', vlist, + resulttype = hop.r_result) + def _is_rpy_instance(gcref): "NOT_RPYTHON" raise NotImplementedError Modified: pypy/branch/gc-module/pypy/rpython/memory/gc/base.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/memory/gc/base.py (original) +++ pypy/branch/gc-module/pypy/rpython/memory/gc/base.py Tue Sep 7 11:13:56 2010 @@ -337,6 +337,10 @@ # ---------- + def get_rpy_memory_usage(self, gcref): + # overridden in semispace.py and markcompact.py to also count the hash + return self.get_size(llmemory.cast_ptr_to_adr(gcref)) + def is_rpy_instance(self, gcref): typeid = self.get_type_id(llmemory.cast_ptr_to_adr(gcref)) return self.is_rpython_class(typeid) Modified: pypy/branch/gc-module/pypy/rpython/memory/gc/markcompact.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/memory/gc/markcompact.py (original) +++ pypy/branch/gc-module/pypy/rpython/memory/gc/markcompact.py Tue Sep 7 11:13:56 2010 @@ -674,6 +674,14 @@ return llmemory.cast_adr_to_int(obj) # not in an arena... return adr - self.space + def get_rpy_memory_usage(self, gcref): + obj = llmemory.cast_ptr_to_adr(gcref) + size = self.get_size(obj) + hdr = self.header(obj) + if hdr.tid & GCFLAG_HASHFIELD: + size += llmemory.sizeof(lltype.Signed) + return size + # ____________________________________________________________ class CannotAllocateGCArena(Exception): Modified: pypy/branch/gc-module/pypy/rpython/memory/gc/semispace.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/memory/gc/semispace.py (original) +++ pypy/branch/gc-module/pypy/rpython/memory/gc/semispace.py Tue Sep 7 11:13:56 2010 @@ -331,6 +331,9 @@ size += llmemory.sizeof(lltype.Signed) return size + def get_rpy_memory_usage(self, gcref): + return self.get_size_incl_hash(llmemory.cast_ptr_to_adr(gcref)) + def scan_copied(self, scan): while scan < self.free: curr = scan + self.size_gc_header() Modified: pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py (original) +++ pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py Tue Sep 7 11:13:56 2010 @@ -396,6 +396,11 @@ [s_gc, s_gcref], rgc.s_list_of_gcrefs(), minimal_transform=False) + self.get_rpy_memory_usage_ptr = getfn( + GCClass.get_rpy_memory_usage.im_func, + [s_gc, s_gcref], + annmodel.SomeInteger(), + minimal_transform=False) self.is_rpy_instance_ptr = getfn(GCClass.is_rpy_instance.im_func, [s_gc, s_gcref], annmodel.SomeBool(), @@ -911,6 +916,14 @@ resultvar=hop.spaceop.result) self.pop_roots(hop, livevars) + def gct_gc_get_rpy_memory_usage(self, hop): + livevars = self.push_roots(hop) + [v_ptr] = hop.spaceop.args + hop.genop("direct_call", + [self.get_rpy_memory_usage_ptr, self.c_const_gc, v_ptr], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + def gct_gc_is_rpy_instance(self, hop): livevars = self.push_roots(hop) [v_ptr] = hop.spaceop.args Modified: pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py ============================================================================== --- pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py (original) +++ pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py Tue Sep 7 11:13:56 2010 @@ -1002,6 +1002,38 @@ def test_try_cast_gcref_to_instance(self): self.run("try_cast_gcref_to_instance") + def define_get_rpy_memory_usage(self): + U = lltype.GcStruct('U', ('x1', lltype.Signed), + ('x2', lltype.Signed), + ('x3', lltype.Signed), + ('x4', lltype.Signed), + ('x5', lltype.Signed), + ('x6', lltype.Signed), + ('x7', lltype.Signed), + ('x8', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + A = lltype.GcArray(lltype.Ptr(S)) + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + a = lltype.malloc(A, 1000) + gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + int1 = rgc.get_rpy_memory_usage(gcref1) + assert 8 <= int1 <= 32 + gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, s.u) + int2 = rgc.get_rpy_memory_usage(gcref2) + assert 4*9 <= int2 <= 8*12 + gcref3 = lltype.cast_opaque_ptr(llmemory.GCREF, a) + int3 = rgc.get_rpy_memory_usage(gcref3) + assert 4*1001 <= int3 <= 8*1010 + return 0 + + return fn + + def test_get_rpy_memory_usage(self): + self.run("get_rpy_memory_usage") + class TestSemiSpaceGC(TestUsingFramework, snippet.SemiSpaceGCTestDefines): gcpolicy = "semispace" From arigo at codespeak.net Tue Sep 7 11:29:06 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 7 Sep 2010 11:29:06 +0200 (CEST) Subject: [pypy-svn] r76909 - in pypy/branch/gc-module/pypy: rpython/memory/gc rpython/memory/gctransform translator/c/test Message-ID: <20100907092906.89F8E282B90@codespeak.net> Author: arigo Date: Tue Sep 7 11:29:04 2010 New Revision: 76909 Modified: pypy/branch/gc-module/pypy/rpython/memory/gc/base.py pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py Log: Translate rgc.get_rpy_typeid(). Modified: pypy/branch/gc-module/pypy/rpython/memory/gc/base.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/memory/gc/base.py (original) +++ pypy/branch/gc-module/pypy/rpython/memory/gc/base.py Tue Sep 7 11:29:04 2010 @@ -341,6 +341,11 @@ # overridden in semispace.py and markcompact.py to also count the hash return self.get_size(llmemory.cast_ptr_to_adr(gcref)) + def get_rpy_typeid(self, gcref): + from pypy.rlib.rarithmetic import intmask + typeid = self.get_type_id(llmemory.cast_ptr_to_adr(gcref)) + return intmask(typeid) + def is_rpy_instance(self, gcref): typeid = self.get_type_id(llmemory.cast_ptr_to_adr(gcref)) return self.is_rpython_class(typeid) Modified: pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py (original) +++ pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py Tue Sep 7 11:29:04 2010 @@ -401,6 +401,10 @@ [s_gc, s_gcref], annmodel.SomeInteger(), minimal_transform=False) + self.get_rpy_typeid_ptr = getfn(GCClass.get_rpy_typeid.im_func, + [s_gc, s_gcref], + annmodel.SomeInteger(), + minimal_transform=False) self.is_rpy_instance_ptr = getfn(GCClass.is_rpy_instance.im_func, [s_gc, s_gcref], annmodel.SomeBool(), @@ -924,6 +928,14 @@ resultvar=hop.spaceop.result) self.pop_roots(hop, livevars) + def gct_gc_get_rpy_typeid(self, hop): + livevars = self.push_roots(hop) + [v_ptr] = hop.spaceop.args + hop.genop("direct_call", + [self.get_rpy_typeid_ptr, self.c_const_gc, v_ptr], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + def gct_gc_is_rpy_instance(self, hop): livevars = self.push_roots(hop) [v_ptr] = hop.spaceop.args Modified: pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py ============================================================================== --- pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py (original) +++ pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py Tue Sep 7 11:29:04 2010 @@ -1034,6 +1034,35 @@ def test_get_rpy_memory_usage(self): self.run("get_rpy_memory_usage") + def define_get_rpy_typeid(self): + U = lltype.GcStruct('U', ('x', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + A = lltype.GcArray(lltype.Ptr(S)) + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + a = lltype.malloc(A, 1000) + s2 = lltype.malloc(S) + gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + int1 = rgc.get_rpy_typeid(gcref1) + gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, s.u) + int2 = rgc.get_rpy_typeid(gcref2) + gcref3 = lltype.cast_opaque_ptr(llmemory.GCREF, a) + int3 = rgc.get_rpy_typeid(gcref3) + gcref4 = lltype.cast_opaque_ptr(llmemory.GCREF, s2) + int4 = rgc.get_rpy_typeid(gcref4) + assert int1 != int2 + assert int1 != int3 + assert int2 != int3 + assert int1 == int4 + return 0 + + return fn + + def test_get_rpy_typeid(self): + self.run("get_rpy_typeid") + class TestSemiSpaceGC(TestUsingFramework, snippet.SemiSpaceGCTestDefines): gcpolicy = "semispace" From arigo at codespeak.net Tue Sep 7 11:33:13 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 7 Sep 2010 11:33:13 +0200 (CEST) Subject: [pypy-svn] r76910 - in pypy/branch/gc-module/pypy: rlib rpython/memory/gc rpython/memory/gctransform translator/c/test Message-ID: <20100907093313.E6361282B90@codespeak.net> Author: arigo Date: Tue Sep 7 11:33:11 2010 New Revision: 76910 Modified: pypy/branch/gc-module/pypy/rlib/rgc.py pypy/branch/gc-module/pypy/rpython/memory/gc/base.py pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py Log: Change rgc.get_rpy_typeid() into rgc.get_rpy_type_index(), which returns the "member index" in the gctypelayout table. More useful for accessing typeids.txt. Modified: pypy/branch/gc-module/pypy/rlib/rgc.py ============================================================================== --- pypy/branch/gc-module/pypy/rlib/rgc.py (original) +++ pypy/branch/gc-module/pypy/rlib/rgc.py Tue Sep 7 11:33:11 2010 @@ -356,7 +356,7 @@ size += Class.__itemsize__ * len(gcref._x) return size -def get_rpy_typeid(gcref): +def get_rpy_type_index(gcref): "NOT_RPYTHON" from pypy.rlib.rarithmetic import intmask Class = gcref._x.__class__ @@ -464,13 +464,13 @@ resulttype = hop.r_result) class Entry(ExtRegistryEntry): - _about_ = get_rpy_typeid + _about_ = get_rpy_type_index def compute_result_annotation(self, s_gcref): from pypy.annotation import model as annmodel return annmodel.SomeInteger() def specialize_call(self, hop): vlist = hop.inputargs(hop.args_r[0]) - return hop.genop('gc_get_rpy_typeid', vlist, + return hop.genop('gc_get_rpy_type_index', vlist, resulttype = hop.r_result) def _is_rpy_instance(gcref): Modified: pypy/branch/gc-module/pypy/rpython/memory/gc/base.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/memory/gc/base.py (original) +++ pypy/branch/gc-module/pypy/rpython/memory/gc/base.py Tue Sep 7 11:33:11 2010 @@ -341,10 +341,10 @@ # overridden in semispace.py and markcompact.py to also count the hash return self.get_size(llmemory.cast_ptr_to_adr(gcref)) - def get_rpy_typeid(self, gcref): + def get_rpy_type_index(self, gcref): from pypy.rlib.rarithmetic import intmask typeid = self.get_type_id(llmemory.cast_ptr_to_adr(gcref)) - return intmask(typeid) + return self.get_member_index(typeid) def is_rpy_instance(self, gcref): typeid = self.get_type_id(llmemory.cast_ptr_to_adr(gcref)) Modified: pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py (original) +++ pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py Tue Sep 7 11:33:11 2010 @@ -401,10 +401,10 @@ [s_gc, s_gcref], annmodel.SomeInteger(), minimal_transform=False) - self.get_rpy_typeid_ptr = getfn(GCClass.get_rpy_typeid.im_func, - [s_gc, s_gcref], - annmodel.SomeInteger(), - minimal_transform=False) + self.get_rpy_type_index_ptr = getfn(GCClass.get_rpy_type_index.im_func, + [s_gc, s_gcref], + annmodel.SomeInteger(), + minimal_transform=False) self.is_rpy_instance_ptr = getfn(GCClass.is_rpy_instance.im_func, [s_gc, s_gcref], annmodel.SomeBool(), @@ -928,11 +928,11 @@ resultvar=hop.spaceop.result) self.pop_roots(hop, livevars) - def gct_gc_get_rpy_typeid(self, hop): + def gct_gc_get_rpy_type_index(self, hop): livevars = self.push_roots(hop) [v_ptr] = hop.spaceop.args hop.genop("direct_call", - [self.get_rpy_typeid_ptr, self.c_const_gc, v_ptr], + [self.get_rpy_type_index_ptr, self.c_const_gc, v_ptr], resultvar=hop.spaceop.result) self.pop_roots(hop, livevars) Modified: pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py ============================================================================== --- pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py (original) +++ pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py Tue Sep 7 11:33:11 2010 @@ -1034,7 +1034,7 @@ def test_get_rpy_memory_usage(self): self.run("get_rpy_memory_usage") - def define_get_rpy_typeid(self): + def define_get_rpy_type_index(self): U = lltype.GcStruct('U', ('x', lltype.Signed)) S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) A = lltype.GcArray(lltype.Ptr(S)) @@ -1045,13 +1045,13 @@ a = lltype.malloc(A, 1000) s2 = lltype.malloc(S) gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s) - int1 = rgc.get_rpy_typeid(gcref1) + int1 = rgc.get_rpy_type_index(gcref1) gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, s.u) - int2 = rgc.get_rpy_typeid(gcref2) + int2 = rgc.get_rpy_type_index(gcref2) gcref3 = lltype.cast_opaque_ptr(llmemory.GCREF, a) - int3 = rgc.get_rpy_typeid(gcref3) + int3 = rgc.get_rpy_type_index(gcref3) gcref4 = lltype.cast_opaque_ptr(llmemory.GCREF, s2) - int4 = rgc.get_rpy_typeid(gcref4) + int4 = rgc.get_rpy_type_index(gcref4) assert int1 != int2 assert int1 != int3 assert int2 != int3 @@ -1060,8 +1060,8 @@ return fn - def test_get_rpy_typeid(self): - self.run("get_rpy_typeid") + def test_get_rpy_type_index(self): + self.run("get_rpy_type_index") class TestSemiSpaceGC(TestUsingFramework, snippet.SemiSpaceGCTestDefines): From arigo at codespeak.net Tue Sep 7 11:39:13 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 7 Sep 2010 11:39:13 +0200 (CEST) Subject: [pypy-svn] r76911 - in pypy/branch/gc-module/pypy/module/gc: . test Message-ID: <20100907093913.C53CC282B90@codespeak.net> Author: arigo Date: Tue Sep 7 11:39:12 2010 New Revision: 76911 Modified: pypy/branch/gc-module/pypy/module/gc/__init__.py pypy/branch/gc-module/pypy/module/gc/referents.py pypy/branch/gc-module/pypy/module/gc/test/test_referents.py Log: Expose get_rpy_type_index() to app-level. Modified: pypy/branch/gc-module/pypy/module/gc/__init__.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/__init__.py (original) +++ pypy/branch/gc-module/pypy/module/gc/__init__.py Tue Sep 7 11:39:12 2010 @@ -21,6 +21,7 @@ 'get_rpy_roots': 'referents.get_rpy_roots', 'get_rpy_referents': 'referents.get_rpy_referents', 'get_rpy_memory_usage': 'referents.get_rpy_memory_usage', + 'get_rpy_type_index': 'referents.get_rpy_type_index', 'get_objects': 'referents.get_objects', 'get_referents': 'referents.get_referents', 'GcRef': 'referents.W_GcRef', Modified: pypy/branch/gc-module/pypy/module/gc/referents.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/referents.py (original) +++ pypy/branch/gc-module/pypy/module/gc/referents.py Tue Sep 7 11:39:12 2010 @@ -50,6 +50,14 @@ size = rgc.get_rpy_memory_usage(gcref) return space.wrap(size) +def get_rpy_type_index(space, w_obj): + """Return an integer identifying the RPython type of the given + object or GcRef. The number starts at 1; it is an index in the + file typeids.txt produced at translation.""" + gcref = unwrap(space, w_obj) + index = rgc.get_rpy_type_index(gcref) + return space.wrap(index) + def _list_w_obj_referents(gcref, result_w): # Get all W_Root reachable directly from gcref, and add them to # the list 'result_w'. The logic here is not robust against gc Modified: pypy/branch/gc-module/pypy/module/gc/test/test_referents.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/test/test_referents.py (original) +++ pypy/branch/gc-module/pypy/module/gc/test/test_referents.py Tue Sep 7 11:39:12 2010 @@ -62,6 +62,20 @@ print n assert 4 <= n <= 64 + def test_get_rpy_type_index(self): + import gc + class Foo(object): + pass + n1 = gc.get_rpy_type_index(12345) + n2 = gc.get_rpy_type_index(23456) + n3 = gc.get_rpy_type_index(1.2) + n4 = gc.get_rpy_type_index(Foo()) + print n1, n2, n3, n4 + assert n1 == n2 + assert n1 != n3 + assert n1 != n4 + assert n3 != n4 + def test_get_referents(self): import gc y = 12345 From antocuni at codespeak.net Tue Sep 7 12:51:45 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Tue, 7 Sep 2010 12:51:45 +0200 (CEST) Subject: [pypy-svn] r76912 - in pypy/build/bot2/pypybuildbot: . test Message-ID: <20100907105145.F2948282B90@codespeak.net> Author: antocuni Date: Tue Sep 7 12:51:44 2010 New Revision: 76912 Modified: pypy/build/bot2/pypybuildbot/builds.py pypy/build/bot2/pypybuildbot/summary.py pypy/build/bot2/pypybuildbot/test/test_builds.py pypy/build/bot2/pypybuildbot/test/test_summary.py Log: - rename TestRunnerCmd into PytestCmd - make it working properly for steps which run more than one py.test (e.g., the Translated one) - write tests :-) Modified: pypy/build/bot2/pypybuildbot/builds.py ============================================================================== --- pypy/build/bot2/pypybuildbot/builds.py (original) +++ pypy/build/bot2/pypybuildbot/builds.py Tue Sep 7 12:51:44 2010 @@ -55,10 +55,12 @@ #self.command = ['cp', '/tmp/pypy-c', '.'] -class TestRunnerCmd(ShellCmd): +class PytestCmd(ShellCmd): def commandComplete(self, cmd): from pypybuildbot.summary import RevisionOutcomeSet + if 'pytestLog' not in cmd.logs: + return pytestLog = cmd.logs['pytestLog'] outcome = RevisionOutcomeSet(None) outcome.populate(pytestLog) @@ -78,7 +80,11 @@ except KeyError: return else: - builder.summary_by_branch_and_revision[(branch, rev)] = summary + d = builder.summary_by_branch_and_revision + key = (branch, rev) + if key in d: + summary += d[key] + d[key] = summary builder.saveYourself() # ________________________________________________________________ @@ -107,7 +113,7 @@ setup_steps(platform, self) - self.addStep(TestRunnerCmd( + self.addStep(PytestCmd( description="pytest", command=["python", "testrunner/runner.py", "--logfile=testrun.log", @@ -137,7 +143,7 @@ if app_tests: if app_tests == True: app_tests = [] - self.addStep(TestRunnerCmd( + self.addStep(PytestCmd( description="app-level (-A) test", command=["python", "testrunner/runner.py", "--logfile=pytest-A.log", @@ -149,7 +155,7 @@ env={"PYTHONPATH": ['.']})) if lib_python: - self.addStep(ShellCmd( + self.addStep(PytestCmd( description="lib-python test", command=["python", "pypy/test_all.py", "--pypy=pypy/translator/goal/pypy-c", @@ -158,7 +164,7 @@ if pypyjit: # upload nightly build, if we're running jit tests - self.addStep(ShellCmd( + self.addStep(PytestCmd( description="pypyjit tests", command=["python", "pypy/test_all.py", "--pypy=pypy/translator/goal/pypy-c", Modified: pypy/build/bot2/pypybuildbot/summary.py ============================================================================== --- pypy/build/bot2/pypybuildbot/summary.py (original) +++ pypy/build/bot2/pypybuildbot/summary.py Tue Sep 7 12:51:44 2010 @@ -33,9 +33,18 @@ def is_ok(self): return self.F == 0 + def to_tuple(self): + return self.p, self.F, self.s, self.x + def __str__(self): return '%d, %d F, %d s, %d x' % (self.p, self.F, self.s, self.x) + def __add__(self, other): + return self.__class__(self.p + other.p, + self.F + other.F, + self.s + other.s, + self.x + other.x) + class RevisionOutcomeSet(object): def __init__(self, rev, key=None, run_info=None): Modified: pypy/build/bot2/pypybuildbot/test/test_builds.py ============================================================================== --- pypy/build/bot2/pypybuildbot/test/test_builds.py (original) +++ pypy/build/bot2/pypybuildbot/test/test_builds.py Tue Sep 7 12:51:44 2010 @@ -1,4 +1,5 @@ import py +from cStringIO import StringIO from pypybuildbot import builds class FakeProperties(object): @@ -60,3 +61,73 @@ rebuilt.start() assert pth.join('mstr').check(dir=True) assert rebuilt.masterdest == str(pth.join('mstr', 'trunk', 'base-123')) + +class TestPytestCmd(object): + + class Fake(object): + def __init__(self, **kwds): + self.__dict__.update(kwds) + + class FakeBuildStatus(Fake): + def getProperties(self): + return self.properties + + class FakeBuilder(Fake): + def saveYourself(self): + pass + + def _create(self, log, rev, branch): + if isinstance(log, str): + log = StringIO(log) + step = builds.PytestCmd() + step.build = self.Fake() + step.build.build_status = self.FakeBuildStatus(properties={'got_revision': rev, + 'branch': branch}) + step.build.build_status.builder = builder = self.FakeBuilder() + cmd = self.Fake(logs={'pytestLog': log}) + return step, cmd, builder + + def test_no_log(self): + step = builds.PytestCmd() + cmd = self.Fake(logs={}) + assert step.commandComplete(cmd) is None + + def test_empty_log(self): + step, cmd, builder = self._create(log='', rev='123', branch='trunk') + step.commandComplete(cmd) + summary = builder.summary_by_branch_and_revision[('trunk', '123')] + assert summary.to_tuple() == (0, 0, 0, 0) + + def test_summary(self): + log = """F a/b.py:test_one +. a/b.py:test_two +s a/b.py:test_three +S a/c.py:test_four +""" + step, cmd, builder = self._create(log=log, rev='123', branch='trunk') + step.commandComplete(cmd) + summary = builder.summary_by_branch_and_revision[('trunk', '123')] + assert summary.to_tuple() == (1, 1, 2, 0) + + def test_branch_is_None(self): + step, cmd, builder = self._create(log='', rev='123', branch=None) + step.commandComplete(cmd) + assert ('trunk', '123') in builder.summary_by_branch_and_revision + + def test_trailing_slash(self): + step, cmd, builder = self._create(log='', rev='123', branch='branch/foo/') + step.commandComplete(cmd) + assert ('branch/foo', '123') in builder.summary_by_branch_and_revision + + def test_multiple_logs(self): + log = """F a/b.py:test_one +. a/b.py:test_two +s a/b.py:test_three +S a/c.py:test_four +""" + step, cmd, builder = self._create(log=log, rev='123', branch='trunk') + step.commandComplete(cmd) + cmd.logs['pytestLog'] = StringIO(log) # "reopen" the file + step.commandComplete(cmd) + summary = builder.summary_by_branch_and_revision[('trunk', '123')] + assert summary.to_tuple() == (2, 2, 4, 0) Modified: pypy/build/bot2/pypybuildbot/test/test_summary.py ============================================================================== --- pypy/build/bot2/pypybuildbot/test/test_summary.py (original) +++ pypy/build/bot2/pypybuildbot/test/test_summary.py Tue Sep 7 12:51:44 2010 @@ -9,6 +9,13 @@ class TestOutcomes(object): + def test_OutcomeSummary(self): + s = summary.OutcomeSummary(1, 2, 3, 4) + assert s.to_tuple() == (1, 2, 3, 4) + assert str(s) == '1, 2 F, 3 s, 4 x' + s2 = s+s + assert s2.to_tuple() == (2, 4, 6, 8) + def test_populate(self): rev_outcome_set = summary.RevisionOutcomeSet(50000, ('foo', 40)) From arigo at codespeak.net Tue Sep 7 13:35:15 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 7 Sep 2010 13:35:15 +0200 (CEST) Subject: [pypy-svn] r76913 - pypy/branch/gc-module/pypy/rlib Message-ID: <20100907113515.5A892282B90@codespeak.net> Author: arigo Date: Tue Sep 7 13:35:13 2010 New Revision: 76913 Modified: pypy/branch/gc-module/pypy/rlib/rgc.py Log: Fix. Modified: pypy/branch/gc-module/pypy/rlib/rgc.py ============================================================================== --- pypy/branch/gc-module/pypy/rlib/rgc.py (original) +++ pypy/branch/gc-module/pypy/rlib/rgc.py Tue Sep 7 13:35:13 2010 @@ -364,7 +364,7 @@ def cast_gcref_to_int(gcref): if we_are_translated(): - return cast_ptr_to_int(gcref) + return lltype.cast_ptr_to_int(gcref) else: return id(gcref._x) From arigo at codespeak.net Tue Sep 7 14:02:18 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 7 Sep 2010 14:02:18 +0200 (CEST) Subject: [pypy-svn] r76914 - pypy/trunk/pypy/module/__builtin__/test Message-ID: <20100907120218.3B911282B90@codespeak.net> Author: arigo Date: Tue Sep 7 14:02:16 2010 New Revision: 76914 Modified: pypy/trunk/pypy/module/__builtin__/test/test_classobj.py Log: Write a lengthy test about old-style classes that look up special method names via the __getattr__() method too, which we don't do so far in PyPy. Modified: pypy/trunk/pypy/module/__builtin__/test/test_classobj.py ============================================================================== --- pypy/trunk/pypy/module/__builtin__/test/test_classobj.py (original) +++ pypy/trunk/pypy/module/__builtin__/test/test_classobj.py Tue Sep 7 14:02:16 2010 @@ -767,6 +767,159 @@ finally: warnings.simplefilter('default', RuntimeWarning) + def test_special_method_via_getattr(self): + class A: + def __getattr__(self, attr): + print 'A getattr:', attr + def callable(*args): + print 'A called:', attr + repr(args) + return attr + repr(args) + return callable + class B: + def __getattr__(self, attr): + print 'B getattr:', attr + def callable(*args): + print 'B called:', attr, args + self.called = attr, args + if attr == '__coerce__': + return self, args[0] + return 42 + return callable + a = A() + a.instancevalue = 42 + assert a.instancevalue == 42 + A.classvalue = 123 + assert a.classvalue == 123 + assert a.foobar(5) == 'foobar(5,)' + assert a.__dict__ == {'instancevalue': 42} + assert a.__class__ is A + # this follows the Python 2.5 rules, more precisely + assert repr(a) == '__repr__()' + assert str(a) == '__str__()' + assert unicode(a) == u'__unicode__()' + b = B() + assert 'called' not in b.__dict__ # and not e.g. ('__init__', ()) + assert len(b) == 42 + assert b.called == ('__len__', ()) + assert a[5] == '__getitem__(5,)' + b[6] = 7 + assert b.called == ('__setitem__', (6, 7)) + del b[8] + assert b.called == ('__delitem__', (8,)) + # + class C: + def __getattr__(self, name): + if name == '__iter__': + return lambda: iter([3, 33, 333]) + raise AttributeError + assert list(iter(C())) == [3, 33, 333] + # + class C: + def __getattr__(self, name): + if name == '__getitem__': + return lambda n: [3, 33, 333][n] + raise AttributeError + assert list(iter(C())) == [3, 33, 333] + # + assert a[:6] == '__getslice__(0, 6)' + b[3:5] = 7 + assert b.called == ('__setslice__', (3, 5, 7)) + del b[:-1000] + assert b.called == ('__delslice__', (0, -958)) # adds len(b)... + assert a(5) == '__call__(5,)' + raises(TypeError, bool, a) # "should return an int" + assert not not b + # + class C: + def __getattr__(self, name): + if name == '__nonzero__': + return lambda: False + raise AttributeError + assert not C() + # + class C: + def __getattr__(self, name): + if name == '__len__': + return lambda: 0 + raise AttributeError + assert not C() + # + #assert cmp(b, 43) == 0 # because __eq__(43) returns 42, so True... + # ... I will leave this case as XXX implement me + assert hash(b) == 42 + assert range(100, 200)[b] == 142 + assert "foo" in b + # + class C: + def __iter__(self): + return self + def __getattr__(self, name): + if name == 'next': + return lambda: 'the next item' + raise AttributeError + for x in C(): + assert x == 'the next item' + break + # + # XXX a really corner case: '__del__' + # + import operator + op_by_name = {"neg": operator.neg, + "pos": operator.pos, + "abs": abs, + "invert": operator.invert, + "int": int, + "long": long} + for opname, opfunc in op_by_name.items(): + assert opfunc(b) == 42 + assert b.called == ("__" + opname + "__", ()) + assert oct(a) == '__oct__()' + assert hex(a) == '__hex__()' + # + class C: + def __getattr__(self, name): + return lambda: 5.5 + raises(TypeError, float, b) + assert float(C()) == 5.5 + # + op_by_name = {'eq': operator.eq, + 'ne': operator.ne, + 'gt': operator.gt, + 'lt': operator.lt, + 'ge': operator.ge, + 'le': operator.le, + 'imod': operator.imod, + 'iand': operator.iand, + 'ipow': operator.ipow, + 'itruediv': operator.itruediv, + 'ilshift': operator.ilshift, + 'ixor': operator.ixor, + 'irshift': operator.irshift, + 'ifloordiv': operator.ifloordiv, + 'idiv': operator.idiv, + 'isub': operator.isub, + 'imul': operator.imul, + 'iadd': operator.iadd, + 'ior': operator.ior, + 'or': operator.or_, + 'and': operator.and_, + 'xor': operator.xor, + 'lshift': operator.lshift, + 'rshift': operator.rshift, + 'add': operator.add, + 'sub': operator.sub, + 'mul': operator.mul, + 'div': operator.div, + 'mod': operator.mod, + 'divmod': divmod, + 'floordiv': operator.floordiv, + 'truediv': operator.truediv} + for opname, opfunc in op_by_name.items(): + assert opfunc(b, 5) == 42 + assert b.called == ("__" + opname + "__", (5,)) + assert coerce(b, 5) == (b, 5) + + class AppTestOldStyleSharing(AppTestOldstyle): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withsharingdict": True}) From arigo at codespeak.net Tue Sep 7 14:21:45 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 7 Sep 2010 14:21:45 +0200 (CEST) Subject: [pypy-svn] r76915 - pypy/trunk/pypy/module/__builtin__/test Message-ID: <20100907122145.BCB51282B90@codespeak.net> Author: arigo Date: Tue Sep 7 14:21:43 2010 New Revision: 76915 Modified: pypy/trunk/pypy/module/__builtin__/test/test_classobj.py Log: Improve the test. Modified: pypy/trunk/pypy/module/__builtin__/test/test_classobj.py ============================================================================== --- pypy/trunk/pypy/module/__builtin__/test/test_classobj.py (original) +++ pypy/trunk/pypy/module/__builtin__/test/test_classobj.py Tue Sep 7 14:21:43 2010 @@ -786,18 +786,25 @@ return 42 return callable a = A() - a.instancevalue = 42 + a.instancevalue = 42 # does not go via __getattr__('__setattr__') + a.__getattr__ = "hi there, ignore me, I'm in a" + a.__setattr__ = "hi there, ignore me, I'm in a too" assert a.instancevalue == 42 A.classvalue = 123 assert a.classvalue == 123 assert a.foobar(5) == 'foobar(5,)' - assert a.__dict__ == {'instancevalue': 42} + assert a.__dict__ == {'instancevalue': 42, + '__getattr__': a.__getattr__, + '__setattr__': a.__setattr__} assert a.__class__ is A - # this follows the Python 2.5 rules, more precisely + # This follows the Python 2.5 rules, more precisely. + # It is still valid in Python 2.7 too. assert repr(a) == '__repr__()' assert str(a) == '__str__()' assert unicode(a) == u'__unicode__()' b = B() + b.__getattr__ = "hi there, ignore me, I'm in b" + b.__setattr__ = "hi there, ignore me, I'm in b too" assert 'called' not in b.__dict__ # and not e.g. ('__init__', ()) assert len(b) == 42 assert b.called == ('__len__', ()) From antocuni at codespeak.net Tue Sep 7 14:24:34 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Tue, 7 Sep 2010 14:24:34 +0200 (CEST) Subject: [pypy-svn] r76916 - in pypy/build/bot2/pypybuildbot: . test Message-ID: <20100907122434.14A23282B90@codespeak.net> Author: antocuni Date: Tue Sep 7 14:24:33 2010 New Revision: 76916 Modified: pypy/build/bot2/pypybuildbot/master.py pypy/build/bot2/pypybuildbot/test/test_pypylist.py Log: rename the builder pypy-c-app-level-win-32 to pypy-c-app-level-win-x86-32 in order to have a consistent naming scheme (in particular with pypy-c-jit-win-x86-32). Moreover, add a test for PyPyTarball to check that the returned builder names actually exist. Modified: pypy/build/bot2/pypybuildbot/master.py ============================================================================== --- pypy/build/bot2/pypybuildbot/master.py (original) +++ pypy/build/bot2/pypybuildbot/master.py Tue Sep 7 14:24:33 2010 @@ -159,11 +159,12 @@ LINUX32 = "own-linux-x86-32" LINUX64 = "own-linux-x86-64" MACOSX32 = "own-macosx-x86-32" +#WIN32 = "own-win-x86-32" APPLVLLINUX32 = "pypy-c-app-level-linux-x86-32" APPLVLLINUX64 = "pypy-c-app-level-linux-64" STACKLESSAPPLVLLINUX32 = "pypy-c-stackless-app-level-linux-x86-32" -APPLVLWIN32 = "pypy-c-app-level-win-32" +APPLVLWIN32 = "pypy-c-app-level-win-x86-32" STACKLESSAPPLVLFREEBSD64 = 'pypy-c-stackless-app-level-freebsd-7-x86-64' JITLINUX32 = "pypy-c-jit-linux-x86-32" Modified: pypy/build/bot2/pypybuildbot/test/test_pypylist.py ============================================================================== --- pypy/build/bot2/pypybuildbot/test/test_pypylist.py (original) +++ pypy/build/bot2/pypybuildbot/test/test_pypylist.py Tue Sep 7 14:24:33 2010 @@ -1,3 +1,4 @@ +import py from pypybuildbot.pypylist import PyPyTarball def test_pypytarball(): @@ -41,27 +42,52 @@ 'pypy-c-stackless-10000-linux.tar.bz2', ] +def load_BuildmasterConfig(): + import os + from pypybuildbot import summary, builds + def load(name): + if name == 'pypybuildbot.summary': + return summary + elif name == 'pypybuildbot.builds': + return builds + else: + assert False + + this = py.path.local(__file__) + master_py = this.dirpath().dirpath().join('master.py') + glob = {'httpPortNumber': 80, + 'slavePortnum': 1234, + 'passwords': {}, + 'load': load, + 'os': os} + execfile(str(master_py), glob) + return glob['BuildmasterConfig'] + def test_builder_names(): + BuildmasterConfig = load_BuildmasterConfig() + builders = [b['name'] for b in BuildmasterConfig['builders']] + known_exceptions = set(['own-win-x86-32']) + def check_builder_names(t, expected_own, expected_app): + own, app = t.get_builder_names() + assert own == expected_own + assert app == expected_app + assert own in builders or own in known_exceptions + assert app in builders or app in known_exceptions + t = PyPyTarball('pypy-c-jit-76867-linux.tar.bz2') - assert t.get_builder_names() == ('own-linux-x86-32', - 'pypy-c-jit-linux-x86-32') + check_builder_names(t, 'own-linux-x86-32', 'pypy-c-jit-linux-x86-32') t = PyPyTarball('pypy-c-nojit-76867-linux.tar.bz2') - assert t.get_builder_names() == ('own-linux-x86-32', - 'pypy-c-app-level-linux-x86-32') + check_builder_names(t, 'own-linux-x86-32', 'pypy-c-app-level-linux-x86-32') t = PyPyTarball('pypy-c-stackless-76867-linux.tar.bz2') - assert t.get_builder_names() == ('own-linux-x86-32', - 'pypy-c-stackless-app-level-linux-x86-32') + check_builder_names(t, 'own-linux-x86-32', 'pypy-c-stackless-app-level-linux-x86-32') t = PyPyTarball('pypy-c-jit-76867-osx.tar.bz2') - assert t.get_builder_names() == ('own-macosx-x86-32', - 'pypy-c-jit-macosx-x86-32') + check_builder_names(t, 'own-macosx-x86-32', 'pypy-c-jit-macosx-x86-32') t = PyPyTarball('pypy-c-jit-76867-linux64.tar.bz2') - assert t.get_builder_names() == ('own-linux-x86-64', - 'pypy-c-jit-linux-x86-64') + check_builder_names(t, 'own-linux-x86-64', 'pypy-c-jit-linux-x86-64') t = PyPyTarball('pypy-c-jit-76867-win32.tar.bz2') - assert t.get_builder_names() == ('own-win-x86-32', - 'pypy-c-jit-win-x86-32') + check_builder_names(t, 'own-win-x86-32', 'pypy-c-jit-win-x86-32') From antocuni at codespeak.net Tue Sep 7 14:31:41 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Tue, 7 Sep 2010 14:31:41 +0200 (CEST) Subject: [pypy-svn] r76917 - in pypy/build/bot2/pypybuildbot: . test Message-ID: <20100907123141.C7C15282B90@codespeak.net> Author: antocuni Date: Tue Sep 7 14:31:39 2010 New Revision: 76917 Modified: pypy/build/bot2/pypybuildbot/master.py pypy/build/bot2/pypybuildbot/test/test_pypylist.py Log: argh, rename also this builder Modified: pypy/build/bot2/pypybuildbot/master.py ============================================================================== --- pypy/build/bot2/pypybuildbot/master.py (original) +++ pypy/build/bot2/pypybuildbot/master.py Tue Sep 7 14:31:39 2010 @@ -161,7 +161,7 @@ MACOSX32 = "own-macosx-x86-32" #WIN32 = "own-win-x86-32" APPLVLLINUX32 = "pypy-c-app-level-linux-x86-32" -APPLVLLINUX64 = "pypy-c-app-level-linux-64" +APPLVLLINUX64 = "pypy-c-app-level-linux-x86-64" STACKLESSAPPLVLLINUX32 = "pypy-c-stackless-app-level-linux-x86-32" APPLVLWIN32 = "pypy-c-app-level-win-x86-32" Modified: pypy/build/bot2/pypybuildbot/test/test_pypylist.py ============================================================================== --- pypy/build/bot2/pypybuildbot/test/test_pypylist.py (original) +++ pypy/build/bot2/pypybuildbot/test/test_pypylist.py Tue Sep 7 14:31:39 2010 @@ -91,3 +91,6 @@ t = PyPyTarball('pypy-c-jit-76867-win32.tar.bz2') check_builder_names(t, 'own-win-x86-32', 'pypy-c-jit-win-x86-32') + + t = PyPyTarball('pypy-c-nojit-76867-linux64.tar.bz2') + check_builder_names(t, 'own-linux-x86-64', 'pypy-c-app-level-linux-x86-64') From arigo at codespeak.net Tue Sep 7 16:26:41 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 7 Sep 2010 16:26:41 +0200 (CEST) Subject: [pypy-svn] r76919 - pypy/branch/gc-module/pypy/rlib Message-ID: <20100907142641.A5F89282B90@codespeak.net> Author: arigo Date: Tue Sep 7 16:26:39 2010 New Revision: 76919 Modified: pypy/branch/gc-module/pypy/rlib/rgc.py Log: Ignore the issue that we can get occasionally some strange objects, that look like instances but whose typeptr is actually NULL. Modified: pypy/branch/gc-module/pypy/rlib/rgc.py ============================================================================== --- pypy/branch/gc-module/pypy/rlib/rgc.py (original) +++ pypy/branch/gc-module/pypy/rlib/rgc.py Tue Sep 7 16:26:39 2010 @@ -411,9 +411,10 @@ from pypy.rpython.lltypesystem import rclass if _is_rpy_instance(gcref): objptr = lltype.cast_opaque_ptr(base_ptr_lltype(), gcref) - clsptr = _get_llcls_from_cls(Class) - if rclass.ll_isinstance(objptr, clsptr): - return cast_base_ptr_to_instance(Class, objptr) + if objptr.typeptr: # may be NULL, e.g. in rdict's dummykeyobj + clsptr = _get_llcls_from_cls(Class) + if rclass.ll_isinstance(objptr, clsptr): + return cast_base_ptr_to_instance(Class, objptr) return None else: if isinstance(gcref._x, Class): From arigo at codespeak.net Tue Sep 7 16:47:19 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 7 Sep 2010 16:47:19 +0200 (CEST) Subject: [pypy-svn] r76920 - in pypy/trunk/pypy: annotation annotation/test rlib Message-ID: <20100907144719.7692A282B90@codespeak.net> Author: arigo Date: Tue Sep 7 16:47:17 2010 New Revision: 76920 Modified: pypy/trunk/pypy/annotation/bookkeeper.py pypy/trunk/pypy/annotation/builtin.py pypy/trunk/pypy/annotation/classdef.py pypy/trunk/pypy/annotation/test/test_annrpython.py pypy/trunk/pypy/rlib/rarithmetic.py Log: Change annotation to say that int(r_uint(x)) is invalid: you have to call intmask(). Fix one place that use it in rlib. Also fix an issue where unexpected constants of type "long", but actually that fit an "int", show up during translation (e.g. constants from .pyc files generated under 32-bit and loaded under 64-bit). Modified: pypy/trunk/pypy/annotation/bookkeeper.py ============================================================================== --- pypy/trunk/pypy/annotation/bookkeeper.py (original) +++ pypy/trunk/pypy/annotation/bookkeeper.py Tue Sep 7 16:47:17 2010 @@ -338,8 +338,12 @@ result = SomeBool() elif tp is int: result = SomeInteger(nonneg = x>=0) - elif tp is long and 0 <= x <= (sys.maxint * 2 + 1): - result = SomeInteger(unsigned = True) + elif tp is long: + if -sys.maxint-1 <= x <= sys.maxint: + x = int(x) + result = SomeInteger(nonneg = x>=0) + else: + raise Exception("seeing a prebuilt long (value %s)" % hex(x)) elif issubclass(tp, str): # py.lib uses annotated str subclasses if len(x) == 1: result = SomeChar() Modified: pypy/trunk/pypy/annotation/builtin.py ============================================================================== --- pypy/trunk/pypy/annotation/builtin.py (original) +++ pypy/trunk/pypy/annotation/builtin.py Tue Sep 7 16:47:17 2010 @@ -92,6 +92,8 @@ return s_obj.is_true() def builtin_int(s_obj, s_base=None): + if isinstance(s_obj, SomeInteger): + assert not s_obj.unsigned, "instead of int(r_uint(x)), use intmask(r_uint(x))" assert (s_base is None or isinstance(s_base, SomeInteger) and s_obj.knowntype == str), "only int(v|string) or int(string,int) expected" if s_base is not None: Modified: pypy/trunk/pypy/annotation/classdef.py ============================================================================== --- pypy/trunk/pypy/annotation/classdef.py (original) +++ pypy/trunk/pypy/annotation/classdef.py Tue Sep 7 16:47:17 2010 @@ -276,6 +276,8 @@ # create the Attribute and do the generalization asked for newattr = Attribute(attr, self.bookkeeper) if s_value: + if newattr.name == 'intval' and getattr(s_value, 'unsigned', False): + import pdb; pdb.set_trace() newattr.s_value = s_value # keep all subattributes' values Modified: pypy/trunk/pypy/annotation/test/test_annrpython.py ============================================================================== --- pypy/trunk/pypy/annotation/test/test_annrpython.py (original) +++ pypy/trunk/pypy/annotation/test/test_annrpython.py Tue Sep 7 16:47:17 2010 @@ -767,7 +767,6 @@ assert s.classdef is a.bookkeeper.getuniqueclassdef(IndexError) # KeyError ignored because l is a list def test_overrides(self): - import sys excs = [] def record_exc(e): """NOT_RPYTHON""" @@ -869,8 +868,27 @@ def f(): return large_constant a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, []) + # if you want to get a r_uint, you have to be explicit about it + + def test_prebuilt_long_that_is_not_too_long(self): + small_constant = 12L + def f(): + return small_constant + a = self.RPythonAnnotator() s = a.build_types(f, []) - assert s.knowntype == r_uint + assert s.const == 12 + assert s.nonneg + assert not s.unsigned + # + small_constant = -23L + def f(): + return small_constant + a = self.RPythonAnnotator() + s = a.build_types(f, []) + assert s.const == -23 + assert not s.nonneg + assert not s.unsigned def test_pbc_getattr(self): class C: @@ -1386,7 +1404,6 @@ assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(Exception) def test_sys_attrs(self): - import sys def f(): return sys.argv[0] a = self.RPythonAnnotator() Modified: pypy/trunk/pypy/rlib/rarithmetic.py ============================================================================== --- pypy/trunk/pypy/rlib/rarithmetic.py (original) +++ pypy/trunk/pypy/rlib/rarithmetic.py Tue Sep 7 16:47:17 2010 @@ -74,7 +74,7 @@ def widen(n): from pypy.rpython.lltypesystem import lltype if _should_widen_type(lltype.typeOf(n)): - return int(n) + return intmask(n) else: return n widen._annspecialcase_ = 'specialize:argtype(0)' From arigo at codespeak.net Tue Sep 7 17:04:33 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 7 Sep 2010 17:04:33 +0200 (CEST) Subject: [pypy-svn] r76921 - pypy/trunk/pypy/rpython/module Message-ID: <20100907150433.DCA50282BD6@codespeak.net> Author: arigo Date: Tue Sep 7 17:04:32 2010 New Revision: 76921 Modified: pypy/trunk/pypy/rpython/module/ll_time.py Log: Fix. Modified: pypy/trunk/pypy/rpython/module/ll_time.py ============================================================================== --- pypy/trunk/pypy/rpython/module/ll_time.py (original) +++ pypy/trunk/pypy/rpython/module/ll_time.py Tue Sep 7 17:04:32 2010 @@ -9,6 +9,7 @@ from pypy.rpython.lltypesystem import lltype from pypy.rpython.extfunc import BaseLazyRegistering, registering, extdef from pypy.rlib import rposix +from pypy.rlib.rarithmetic import intmask from pypy.translator.tool.cbuild import ExternalCompilationInfo if sys.platform == 'win32': @@ -119,7 +120,8 @@ if self.HAVE_FTIME: t = lltype.malloc(self.TIMEB, flavor='raw') c_ftime(t) - result = float(int(t.c_time)) + float(int(t.c_millitm)) * 0.001 + result = (float(intmask(t.c_time)) + + float(intmask(t.c_millitm)) * 0.001) lltype.free(t, flavor='raw') return result return float(c_time(void)) From fijal at codespeak.net Tue Sep 7 18:15:56 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Tue, 7 Sep 2010 18:15:56 +0200 (CEST) Subject: [pypy-svn] r76923 - in pypy/trunk/pypy: module/_socket/test rlib Message-ID: <20100907161556.8E864282BD6@codespeak.net> Author: fijal Date: Tue Sep 7 18:15:54 2010 New Revision: 76923 Modified: pypy/trunk/pypy/module/_socket/test/test_sock_app.py pypy/trunk/pypy/rlib/rsocket.py Log: A test for untested piece of code (initializing raw addresses). Also kill a sanity check that was in wrong place at the very least. Reintroduce if needed, but with tests please Modified: pypy/trunk/pypy/module/_socket/test/test_sock_app.py ============================================================================== --- pypy/trunk/pypy/module/_socket/test/test_sock_app.py (original) +++ pypy/trunk/pypy/module/_socket/test/test_sock_app.py Tue Sep 7 18:15:54 2010 @@ -221,6 +221,21 @@ "(_socket, host, port): return _socket.getaddrinfo(host, port)") assert space.unwrap(w_l) == info +def test_unknown_addr_as_object(): + from pypy.rlib import rsocket + from pypy.rpython.lltypesystem import lltype, rffi + + c_addr = lltype.malloc(rsocket._c.sockaddr, flavor='raw') + c_addr.c_sa_data[0] = 'c' + rffi.setintfield(c_addr, 'c_sa_family', 15) + # XXX what size to pass here? for the purpose of this test it has + # to be short enough so we have some data, 1 sounds good enough + # + sizeof USHORT + w_obj = rsocket.Address(c_addr, 1 + 2).as_object(space) + assert space.is_true(space.isinstance(w_obj, space.w_tuple)) + assert space.int_w(space.getitem(w_obj, space.wrap(0))) == 15 + assert space.str_w(space.getitem(w_obj, space.wrap(1))) == 'c' + def test_getnameinfo(): host = "127.0.0.1" port = 25 @@ -440,7 +455,6 @@ s2 = s.dup() assert s.fileno() != s2.fileno() assert s.getsockname() == s2.getsockname() - def test_buffer_or_unicode(self): # Test that send/sendall/sendto accept a buffer or a unicode as arg Modified: pypy/trunk/pypy/rlib/rsocket.py ============================================================================== --- pypy/trunk/pypy/rlib/rsocket.py (original) +++ pypy/trunk/pypy/rlib/rsocket.py Tue Sep 7 18:15:54 2010 @@ -100,8 +100,6 @@ def lock(self, TYPE=_c.sockaddr): """Return self.addr_p, cast as a pointer to TYPE. Must call unlock()! """ - if not (self.minlen <= self.addrlen <= self.maxlen): - raise RSocketError("invalid address") return rffi.cast(lltype.Ptr(TYPE), self.addr_p) lock._annspecialcase_ = 'specialize:ll' From arigo at codespeak.net Tue Sep 7 18:16:16 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 7 Sep 2010 18:16:16 +0200 (CEST) Subject: [pypy-svn] r76924 - in pypy/trunk/pypy/module/__builtin__: . test Message-ID: <20100907161616.0730B282BD6@codespeak.net> Author: arigo Date: Tue Sep 7 18:16:15 2010 New Revision: 76924 Modified: pypy/trunk/pypy/module/__builtin__/interp_classobj.py pypy/trunk/pypy/module/__builtin__/test/test_classobj.py Log: Fix for the test. Modified: pypy/trunk/pypy/module/__builtin__/interp_classobj.py ============================================================================== --- pypy/trunk/pypy/module/__builtin__/interp_classobj.py (original) +++ pypy/trunk/pypy/module/__builtin__/interp_classobj.py Tue Sep 7 18:16:15 2010 @@ -201,7 +201,7 @@ w_inst = W_InstanceObjectWithDel(space, self) else: w_inst = W_InstanceObject(space, self) - w_init = w_inst.getattr(space, space.wrap('__init__'), False) + w_init = w_inst.getattr_from_class(space, space.wrap('__init__')) if w_init is not None: w_result = space.call_args(w_init, __args__) if not space.is_w(w_result, space.w_None): @@ -337,25 +337,44 @@ space.wrap("__class__ must be set to a class")) self.w_class = w_class - - def getattr(self, space, w_name, exc=True): - w_result = space.finditem(self.w_dict, w_name) - if w_result is not None: - return w_result + def getattr_from_class(self, space, w_name): + # Look up w_name in the class dict, and call its __get__. + # This method ignores the instance dict and the __getattr__. + # Returns None if not found. w_value = self.w_class.lookup(space, w_name) if w_value is None: - if exc: - raise operationerrfmt( - space.w_AttributeError, - "%s instance has no attribute '%s'", - self.w_class.name, space.str_w(w_name)) - else: - return None + return None w_descr_get = space.lookup(w_value, '__get__') if w_descr_get is None: return w_value return space.call_function(w_descr_get, w_value, self, self.w_class) + def getattr(self, space, w_name, exc=True): + # Normal getattr rules: look up w_name in the instance dict, + # in the class dict, and then via a call to __getatttr__. + w_result = space.finditem(self.w_dict, w_name) + if w_result is not None: + return w_result + w_result = self.getattr_from_class(space, w_name) + if w_result is not None: + return w_result + w_meth = self.getattr_from_class(space, space.wrap('__getattr__')) + if w_meth is not None: + try: + return space.call_function(w_meth, w_name) + except OperationError, e: + if not exc and e.match(space, space.w_AttributeError): + return None # eat the AttributeError + raise + # not found at all + if exc: + raise operationerrfmt( + space.w_AttributeError, + "%s instance has no attribute '%s'", + self.w_class.name, space.str_w(w_name)) + else: + return None + def descr_getattribute(self, space, w_attr): name = space.str_w(w_attr) if len(name) >= 8 and name[0] == '_': @@ -363,19 +382,11 @@ return self.w_dict elif name == "__class__": return self.w_class - try: - return self.getattr(space, w_attr) - except OperationError, e: - if not e.match(space, space.w_AttributeError): - raise - w_meth = self.getattr(space, space.wrap('__getattr__'), False) - if w_meth is not None: - return space.call_function(w_meth, w_attr) - raise + return self.getattr(space, w_attr) def descr_setattr(self, space, w_name, w_value): name = unwrap_attr(space, w_name) - w_meth = self.getattr(space, space.wrap('__setattr__'), False) + w_meth = self.getattr_from_class(space, space.wrap('__setattr__')) if name and name[0] == "_": if name == '__dict__': self.setdict(space, w_value) @@ -405,7 +416,7 @@ # use setclass to raise the error self.setclass(space, None) return - w_meth = self.getattr(space, space.wrap('__delattr__'), False) + w_meth = self.getattr_from_class(space, space.wrap('__delattr__')) if w_meth is not None: space.call_function(w_meth, w_name) else: @@ -658,7 +669,10 @@ def descr_del(self, space): # Note that this is called from executioncontext.UserDelAction # via the space.userdel() method. - w_func = self.getattr(space, space.wrap('__del__'), False) + w_name = space.wrap('__del__') + w_func = space.finditem(self.w_dict, w_name) + if w_func is None: + w_func = self.getattr_from_class(space, w_name) if w_func is not None: space.call_function(w_func) Modified: pypy/trunk/pypy/module/__builtin__/test/test_classobj.py ============================================================================== --- pypy/trunk/pypy/module/__builtin__/test/test_classobj.py (original) +++ pypy/trunk/pypy/module/__builtin__/test/test_classobj.py Tue Sep 7 18:16:15 2010 @@ -924,7 +924,9 @@ for opname, opfunc in op_by_name.items(): assert opfunc(b, 5) == 42 assert b.called == ("__" + opname + "__", (5,)) - assert coerce(b, 5) == (b, 5) + x, y = coerce(b, 5) + assert x is b + assert y == 5 class AppTestOldStyleSharing(AppTestOldstyle): From arigo at codespeak.net Tue Sep 7 18:24:59 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 7 Sep 2010 18:24:59 +0200 (CEST) Subject: [pypy-svn] r76925 - pypy/branch/gc-module/pypy/module/gc/test Message-ID: <20100907162459.5E091282BE8@codespeak.net> Author: arigo Date: Tue Sep 7 18:24:57 2010 New Revision: 76925 Modified: pypy/branch/gc-module/pypy/module/gc/test/test_referents.py Log: Fix these tests to also work on "pypy-c py.test -A". Modified: pypy/branch/gc-module/pypy/module/gc/test/test_referents.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/test/test_referents.py (original) +++ pypy/branch/gc-module/pypy/module/gc/test/test_referents.py Tue Sep 7 18:24:57 2010 @@ -1,3 +1,4 @@ +from pypy.conftest import option class AppTestReferents(object): @@ -12,6 +13,7 @@ cls.w_ALL_ROOTS = cls.space.newlist(cls.ALL_ROOTS) rgc.get_rpy_roots = lambda: ( map(rgc._GcRef, cls.ALL_ROOTS) + [rgc.NULL_GCREF]*17) + cls.w_runappdirect = cls.space.wrap(option.runappdirect) def teardown_class(cls): from pypy.rlib import rgc @@ -20,10 +22,15 @@ def test_get_objects(self): import gc lst = gc.get_objects() - assert 2 in lst - assert 4 in lst - assert 7 in lst - assert [2, 7] in lst + i4, l27, ro = self.ALL_ROOTS + i2, i7 = l27 + found = 0 + for x in lst: + if x is i4: found |= 1 + if x is i2: found |= 2 + if x is i7: found |= 4 + if x is l27: found |= 8 + assert found == 15 for x in lst: if type(x) is gc.GcRef: assert 0, "get_objects() returned a GcRef" @@ -31,10 +38,13 @@ def test_get_rpy_roots(self): import gc lst = gc.get_rpy_roots() - assert lst[0] == 4 - assert lst[1] == [2, 7] - assert type(lst[2]) is gc.GcRef - assert len(lst) == 3 + if self.runappdirect: + pass # unsure what to test + else: + assert lst[0] == 4 + assert lst[1] == [2, 7] + assert type(lst[2]) is gc.GcRef + assert len(lst) == 3 def test_get_rpy_referents(self): import gc From hakanardo at codespeak.net Tue Sep 7 19:22:37 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Tue, 7 Sep 2010 19:22:37 +0200 (CEST) Subject: [pypy-svn] r76926 - in pypy/branch/jit-bounds: . lib_pypy/_ctypes pypy/annotation pypy/annotation/test pypy/bin pypy/interpreter pypy/jit/backend/llsupport pypy/jit/codewriter pypy/jit/metainterp/optimizeopt pypy/module/__builtin__ pypy/module/__builtin__/test pypy/module/_socket/test pypy/module/array/benchmark pypy/module/array/test pypy/module/cpyext pypy/module/posix/test pypy/objspace/std pypy/rlib pypy/rpython pypy/rpython/lltypesystem pypy/rpython/lltypesystem/test pypy/rpython/memory pypy/rpython/memory/gc pypy/rpython/memory/gc/test pypy/rpython/memory/test pypy/rpython/module pypy/rpython/ootypesystem pypy/rpython/test pypy/translator pypy/translator/backendopt/test pypy/translator/c pypy/translator/c/gcc pypy/translator/c/test Message-ID: <20100907172237.8EE22282BDD@codespeak.net> Author: hakanardo Date: Tue Sep 7 19:22:31 2010 New Revision: 76926 Modified: pypy/branch/jit-bounds/ (props changed) pypy/branch/jit-bounds/lib_pypy/_ctypes/array.py pypy/branch/jit-bounds/lib_pypy/_ctypes/function.py pypy/branch/jit-bounds/lib_pypy/_ctypes/primitive.py pypy/branch/jit-bounds/pypy/annotation/binaryop.py pypy/branch/jit-bounds/pypy/annotation/bookkeeper.py pypy/branch/jit-bounds/pypy/annotation/builtin.py pypy/branch/jit-bounds/pypy/annotation/classdef.py pypy/branch/jit-bounds/pypy/annotation/model.py pypy/branch/jit-bounds/pypy/annotation/test/test_annrpython.py pypy/branch/jit-bounds/pypy/bin/py.py pypy/branch/jit-bounds/pypy/interpreter/argument.py pypy/branch/jit-bounds/pypy/interpreter/baseobjspace.py pypy/branch/jit-bounds/pypy/interpreter/pycode.py pypy/branch/jit-bounds/pypy/jit/backend/llsupport/gc.py pypy/branch/jit-bounds/pypy/jit/codewriter/jtransform.py pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py (props changed) pypy/branch/jit-bounds/pypy/module/__builtin__/interp_classobj.py pypy/branch/jit-bounds/pypy/module/__builtin__/test/test_classobj.py pypy/branch/jit-bounds/pypy/module/_socket/test/test_sock_app.py pypy/branch/jit-bounds/pypy/module/array/benchmark/Makefile (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/intimg.c (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/intimgtst.c (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/intimgtst.py (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/loop.c (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/sum.c (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/sumtst.c (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/sumtst.py (props changed) pypy/branch/jit-bounds/pypy/module/array/test/test_array_old.py (props changed) pypy/branch/jit-bounds/pypy/module/cpyext/api.py pypy/branch/jit-bounds/pypy/module/posix/test/test_posix2.py pypy/branch/jit-bounds/pypy/objspace/std/objspace.py pypy/branch/jit-bounds/pypy/objspace/std/tupleobject.py pypy/branch/jit-bounds/pypy/rlib/rarithmetic.py pypy/branch/jit-bounds/pypy/rlib/rsocket.py pypy/branch/jit-bounds/pypy/rlib/rweakref.py pypy/branch/jit-bounds/pypy/rpython/lltypesystem/llgroup.py pypy/branch/jit-bounds/pypy/rpython/lltypesystem/llmemory.py pypy/branch/jit-bounds/pypy/rpython/lltypesystem/lloperation.py pypy/branch/jit-bounds/pypy/rpython/lltypesystem/lltype.py pypy/branch/jit-bounds/pypy/rpython/lltypesystem/opimpl.py pypy/branch/jit-bounds/pypy/rpython/lltypesystem/rffi.py pypy/branch/jit-bounds/pypy/rpython/lltypesystem/test/test_llgroup.py pypy/branch/jit-bounds/pypy/rpython/lltypesystem/test/test_lloperation.py pypy/branch/jit-bounds/pypy/rpython/lltypesystem/test/test_lltype.py pypy/branch/jit-bounds/pypy/rpython/memory/gc/base.py pypy/branch/jit-bounds/pypy/rpython/memory/gc/generation.py pypy/branch/jit-bounds/pypy/rpython/memory/gc/markcompact.py pypy/branch/jit-bounds/pypy/rpython/memory/gc/test/test_direct.py pypy/branch/jit-bounds/pypy/rpython/memory/gctypelayout.py pypy/branch/jit-bounds/pypy/rpython/memory/gcwrapper.py pypy/branch/jit-bounds/pypy/rpython/memory/test/test_gc.py pypy/branch/jit-bounds/pypy/rpython/memory/test/test_gctypelayout.py pypy/branch/jit-bounds/pypy/rpython/memory/test/test_transformed_gc.py pypy/branch/jit-bounds/pypy/rpython/module/ll_time.py pypy/branch/jit-bounds/pypy/rpython/ootypesystem/ootype.py pypy/branch/jit-bounds/pypy/rpython/ootypesystem/rclass.py pypy/branch/jit-bounds/pypy/rpython/rbuiltin.py pypy/branch/jit-bounds/pypy/rpython/rclass.py pypy/branch/jit-bounds/pypy/rpython/test/test_rclass.py pypy/branch/jit-bounds/pypy/translator/backendopt/test/test_constfold.py pypy/branch/jit-bounds/pypy/translator/c/database.py pypy/branch/jit-bounds/pypy/translator/c/gc.py pypy/branch/jit-bounds/pypy/translator/c/gcc/trackgcroot.py pypy/branch/jit-bounds/pypy/translator/c/node.py pypy/branch/jit-bounds/pypy/translator/c/test/test_newgc.py pypy/branch/jit-bounds/pypy/translator/exceptiontransform.py Log: svn merge -r76816:r76924 svn+ssh://hakanardo at codespeak.net/svn/pypy/trunk Modified: pypy/branch/jit-bounds/lib_pypy/_ctypes/array.py ============================================================================== --- pypy/branch/jit-bounds/lib_pypy/_ctypes/array.py (original) +++ pypy/branch/jit-bounds/lib_pypy/_ctypes/array.py Tue Sep 7 19:22:31 2010 @@ -75,7 +75,7 @@ def _CData_output(self, resarray, base=None, index=-1): # this seems to be a string if we're array of char, surprise! - from ctypes import c_char, c_wchar, c_char_p, c_wchar_p + from ctypes import c_char, c_wchar if self._type_ is c_char: return _rawffi.charp2string(resarray.buffer, self._length_) if self._type_ is c_wchar: Modified: pypy/branch/jit-bounds/lib_pypy/_ctypes/function.py ============================================================================== --- pypy/branch/jit-bounds/lib_pypy/_ctypes/function.py (original) +++ pypy/branch/jit-bounds/lib_pypy/_ctypes/function.py Tue Sep 7 19:22:31 2010 @@ -60,7 +60,6 @@ return self._restype_ def _setrestype(self, restype): self._ptr = None - from ctypes import c_char_p if restype is int: from ctypes import c_int restype = c_int @@ -214,9 +213,7 @@ @staticmethod def _guess_argtypes(args): - from _ctypes import _CData from ctypes import c_char_p, c_wchar_p, c_void_p, c_int - from ctypes import Array, Structure res = [] for arg in args: if hasattr(arg, '_as_parameter_'): Modified: pypy/branch/jit-bounds/lib_pypy/_ctypes/primitive.py ============================================================================== --- pypy/branch/jit-bounds/lib_pypy/_ctypes/primitive.py (original) +++ pypy/branch/jit-bounds/lib_pypy/_ctypes/primitive.py Tue Sep 7 19:22:31 2010 @@ -57,7 +57,6 @@ pyobj_container = GlobalPyobjContainer() def generic_xxx_p_from_param(cls, value): - from _ctypes import Array, _Pointer if value is None: return cls(None) if isinstance(value, basestring): @@ -119,8 +118,6 @@ result._ffiarray = ffiarray if tp == 'z': # c_char_p - from _ctypes import Array, _Pointer - def _getvalue(self): addr = self._buffer[0] if addr == 0: @@ -143,7 +140,7 @@ result.value = property(_getvalue, _setvalue) elif tp == 'Z': # c_wchar_p - from _ctypes import Array, _Pointer, _wstring_at + from _ctypes import _wstring_at def _getvalue(self): addr = self._buffer[0] if addr == 0: Modified: pypy/branch/jit-bounds/pypy/annotation/binaryop.py ============================================================================== --- pypy/branch/jit-bounds/pypy/annotation/binaryop.py (original) +++ pypy/branch/jit-bounds/pypy/annotation/binaryop.py Tue Sep 7 19:22:31 2010 @@ -924,10 +924,10 @@ class __extend__(pairtype(SomeAddress, SomeAddress)): def union((s_addr1, s_addr2)): - return SomeAddress(is_null=s_addr1.is_null and s_addr2.is_null) + return SomeAddress() def sub((s_addr1, s_addr2)): - if s_addr1.is_null and s_addr2.is_null: + if s_addr1.is_null_address() and s_addr2.is_null_address(): return getbookkeeper().immutablevalue(0) return SomeInteger() @@ -953,10 +953,10 @@ class __extend__(pairtype(SomeAddress, SomeInteger)): def add((s_addr, s_int)): - return SomeAddress(is_null=False) + return SomeAddress() def sub((s_addr, s_int)): - return SomeAddress(is_null=False) + return SomeAddress() class __extend__(pairtype(SomeAddress, SomeImpossibleValue)): # need to override this specifically to hide the 'raise UnionError' Modified: pypy/branch/jit-bounds/pypy/annotation/bookkeeper.py ============================================================================== --- pypy/branch/jit-bounds/pypy/annotation/bookkeeper.py (original) +++ pypy/branch/jit-bounds/pypy/annotation/bookkeeper.py Tue Sep 7 19:22:31 2010 @@ -338,8 +338,12 @@ result = SomeBool() elif tp is int: result = SomeInteger(nonneg = x>=0) - elif tp is long and 0 <= x <= (sys.maxint * 2 + 1): - result = SomeInteger(unsigned = True) + elif tp is long: + if -sys.maxint-1 <= x <= sys.maxint: + x = int(x) + result = SomeInteger(nonneg = x>=0) + else: + raise Exception("seeing a prebuilt long (value %s)" % hex(x)) elif issubclass(tp, str): # py.lib uses annotated str subclasses if len(x) == 1: result = SomeChar() @@ -431,7 +435,7 @@ elif isinstance(x, lltype._ptr): result = SomePtr(lltype.typeOf(x)) elif isinstance(x, llmemory.fakeaddress): - result = SomeAddress(is_null=not x) + result = SomeAddress() elif isinstance(x, ootype._static_meth): result = SomeOOStaticMeth(ootype.typeOf(x)) elif isinstance(x, ootype._class): Modified: pypy/branch/jit-bounds/pypy/annotation/builtin.py ============================================================================== --- pypy/branch/jit-bounds/pypy/annotation/builtin.py (original) +++ pypy/branch/jit-bounds/pypy/annotation/builtin.py Tue Sep 7 19:22:31 2010 @@ -92,6 +92,8 @@ return s_obj.is_true() def builtin_int(s_obj, s_base=None): + if isinstance(s_obj, SomeInteger): + assert not s_obj.unsigned, "instead of int(r_uint(x)), use intmask(r_uint(x))" assert (s_base is None or isinstance(s_base, SomeInteger) and s_obj.knowntype == str), "only int(v|string) or int(string,int) expected" if s_base is not None: @@ -694,18 +696,14 @@ def raw_free(s_addr): assert isinstance(s_addr, SomeAddress) - assert not s_addr.is_null def raw_memclear(s_addr, s_int): assert isinstance(s_addr, SomeAddress) - assert not s_addr.is_null assert isinstance(s_int, SomeInteger) def raw_memcopy(s_addr1, s_addr2, s_int): assert isinstance(s_addr1, SomeAddress) - assert not s_addr1.is_null assert isinstance(s_addr2, SomeAddress) - assert not s_addr2.is_null assert isinstance(s_int, SomeInteger) #XXX add noneg...? BUILTIN_ANALYZERS[llmemory.raw_malloc] = raw_malloc Modified: pypy/branch/jit-bounds/pypy/annotation/classdef.py ============================================================================== --- pypy/branch/jit-bounds/pypy/annotation/classdef.py (original) +++ pypy/branch/jit-bounds/pypy/annotation/classdef.py Tue Sep 7 19:22:31 2010 @@ -276,6 +276,8 @@ # create the Attribute and do the generalization asked for newattr = Attribute(attr, self.bookkeeper) if s_value: + if newattr.name == 'intval' and getattr(s_value, 'unsigned', False): + import pdb; pdb.set_trace() newattr.s_value = s_value # keep all subattributes' values Modified: pypy/branch/jit-bounds/pypy/annotation/model.py ============================================================================== --- pypy/branch/jit-bounds/pypy/annotation/model.py (original) +++ pypy/branch/jit-bounds/pypy/annotation/model.py Tue Sep 7 19:22:31 2010 @@ -500,12 +500,13 @@ class SomeAddress(SomeObject): immutable = True - def __init__(self, is_null=False): - self.is_null = is_null def can_be_none(self): return False + def is_null_address(self): + return self.is_immutable_constant() and not self.const + # The following class is used to annotate the intermediate value that # appears in expressions of the form: # addr.signed[offset] and addr.signed[offset] = value Modified: pypy/branch/jit-bounds/pypy/annotation/test/test_annrpython.py ============================================================================== --- pypy/branch/jit-bounds/pypy/annotation/test/test_annrpython.py (original) +++ pypy/branch/jit-bounds/pypy/annotation/test/test_annrpython.py Tue Sep 7 19:22:31 2010 @@ -767,7 +767,6 @@ assert s.classdef is a.bookkeeper.getuniqueclassdef(IndexError) # KeyError ignored because l is a list def test_overrides(self): - import sys excs = [] def record_exc(e): """NOT_RPYTHON""" @@ -869,8 +868,27 @@ def f(): return large_constant a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, []) + # if you want to get a r_uint, you have to be explicit about it + + def test_prebuilt_long_that_is_not_too_long(self): + small_constant = 12L + def f(): + return small_constant + a = self.RPythonAnnotator() s = a.build_types(f, []) - assert s.knowntype == r_uint + assert s.const == 12 + assert s.nonneg + assert not s.unsigned + # + small_constant = -23L + def f(): + return small_constant + a = self.RPythonAnnotator() + s = a.build_types(f, []) + assert s.const == -23 + assert not s.nonneg + assert not s.unsigned def test_pbc_getattr(self): class C: @@ -1386,7 +1404,6 @@ assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(Exception) def test_sys_attrs(self): - import sys def f(): return sys.argv[0] a = self.RPythonAnnotator() Modified: pypy/branch/jit-bounds/pypy/bin/py.py ============================================================================== --- pypy/branch/jit-bounds/pypy/bin/py.py (original) +++ pypy/branch/jit-bounds/pypy/bin/py.py Tue Sep 7 19:22:31 2010 @@ -76,6 +76,8 @@ config.objspace.suggest(allworkingmodules=False) if config.objspace.allworkingmodules: pypyoption.enable_allworkingmodules(config) + if config.objspace.usemodules.thread: + config.translation.thread = True # create the object space Modified: pypy/branch/jit-bounds/pypy/interpreter/argument.py ============================================================================== --- pypy/branch/jit-bounds/pypy/interpreter/argument.py (original) +++ pypy/branch/jit-bounds/pypy/interpreter/argument.py Tue Sep 7 19:22:31 2010 @@ -52,11 +52,15 @@ self.argnames, self.varargname, self.kwargname) def __eq__(self, other): + if not isinstance(other, Signature): + return NotImplemented return (self.argnames == other.argnames and self.varargname == other.varargname and self.kwargname == other.kwargname) def __ne__(self, other): + if not isinstance(other, Signature): + return NotImplemented return not self == other Modified: pypy/branch/jit-bounds/pypy/interpreter/baseobjspace.py ============================================================================== --- pypy/branch/jit-bounds/pypy/interpreter/baseobjspace.py (original) +++ pypy/branch/jit-bounds/pypy/interpreter/baseobjspace.py Tue Sep 7 19:22:31 2010 @@ -288,6 +288,7 @@ self.timer.stop("startup " + modname) def finish(self): + self.wait_for_thread_shutdown() w_exitfunc = self.sys.getdictvalue(self, 'exitfunc') if w_exitfunc is not None: self.call_function(w_exitfunc) @@ -305,6 +306,23 @@ for s in self.FrameClass._space_op_types: print s + def wait_for_thread_shutdown(self): + """Wait until threading._shutdown() completes, provided the threading + module was imported in the first place. The shutdown routine will + wait until all non-daemon 'threading' threads have completed.""" + if not self.config.translation.thread: + return + + w_modules = self.sys.get('modules') + w_mod = self.finditem_str(w_modules, 'threading') + if w_mod is None: + return + + try: + self.call_method(w_mod, "_shutdown") + except OperationError, e: + e.write_unraisable(self, "threading._shutdown()") + def reportbytecodecounts(self): os.write(2, "Starting bytecode report.\n") fd = os.open('bytecode.txt', os.O_CREAT|os.O_WRONLY|os.O_TRUNC, 0644) Modified: pypy/branch/jit-bounds/pypy/interpreter/pycode.py ============================================================================== --- pypy/branch/jit-bounds/pypy/interpreter/pycode.py (original) +++ pypy/branch/jit-bounds/pypy/interpreter/pycode.py Tue Sep 7 19:22:31 2010 @@ -4,7 +4,7 @@ The bytecode interpreter itself is implemented by the PyFrame class. """ -import dis, imp, struct, types, new +import dis, imp, struct, types, new, sys from pypy.interpreter import eval from pypy.interpreter.argument import Signature @@ -118,7 +118,8 @@ self._compute_flatcall() def _freeze_(self): - if self.magic == cpython_magic: + if (self.magic == cpython_magic and + '__pypy__' not in sys.builtin_module_names): raise Exception("CPython host codes should not be rendered") return False Modified: pypy/branch/jit-bounds/pypy/jit/backend/llsupport/gc.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/backend/llsupport/gc.py (original) +++ pypy/branch/jit-bounds/pypy/jit/backend/llsupport/gc.py Tue Sep 7 19:22:31 2010 @@ -328,7 +328,7 @@ DEBUG = False # forced to True by x86/test/test_zrpy_gc.py def __init__(self, gcdescr, translator, rtyper, llop1=llop): - from pypy.rpython.memory.gctypelayout import _check_typeid + from pypy.rpython.memory.gctypelayout import check_typeid from pypy.rpython.memory.gcheader import GCHeaderBuilder from pypy.rpython.memory.gctransform import framework GcLLDescription.__init__(self, gcdescr, translator, rtyper) @@ -375,7 +375,7 @@ def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) has_finalizer = bool(tid & (1<= 8 and name[0] == '_': @@ -363,19 +382,11 @@ return self.w_dict elif name == "__class__": return self.w_class - try: - return self.getattr(space, w_attr) - except OperationError, e: - if not e.match(space, space.w_AttributeError): - raise - w_meth = self.getattr(space, space.wrap('__getattr__'), False) - if w_meth is not None: - return space.call_function(w_meth, w_attr) - raise + return self.getattr(space, w_attr) def descr_setattr(self, space, w_name, w_value): name = unwrap_attr(space, w_name) - w_meth = self.getattr(space, space.wrap('__setattr__'), False) + w_meth = self.getattr_from_class(space, space.wrap('__setattr__')) if name and name[0] == "_": if name == '__dict__': self.setdict(space, w_value) @@ -405,7 +416,7 @@ # use setclass to raise the error self.setclass(space, None) return - w_meth = self.getattr(space, space.wrap('__delattr__'), False) + w_meth = self.getattr_from_class(space, space.wrap('__delattr__')) if w_meth is not None: space.call_function(w_meth, w_name) else: @@ -658,7 +669,10 @@ def descr_del(self, space): # Note that this is called from executioncontext.UserDelAction # via the space.userdel() method. - w_func = self.getattr(space, space.wrap('__del__'), False) + w_name = space.wrap('__del__') + w_func = space.finditem(self.w_dict, w_name) + if w_func is None: + w_func = self.getattr_from_class(space, w_name) if w_func is not None: space.call_function(w_func) Modified: pypy/branch/jit-bounds/pypy/module/__builtin__/test/test_classobj.py ============================================================================== --- pypy/branch/jit-bounds/pypy/module/__builtin__/test/test_classobj.py (original) +++ pypy/branch/jit-bounds/pypy/module/__builtin__/test/test_classobj.py Tue Sep 7 19:22:31 2010 @@ -767,6 +767,168 @@ finally: warnings.simplefilter('default', RuntimeWarning) + def test_special_method_via_getattr(self): + class A: + def __getattr__(self, attr): + print 'A getattr:', attr + def callable(*args): + print 'A called:', attr + repr(args) + return attr + repr(args) + return callable + class B: + def __getattr__(self, attr): + print 'B getattr:', attr + def callable(*args): + print 'B called:', attr, args + self.called = attr, args + if attr == '__coerce__': + return self, args[0] + return 42 + return callable + a = A() + a.instancevalue = 42 # does not go via __getattr__('__setattr__') + a.__getattr__ = "hi there, ignore me, I'm in a" + a.__setattr__ = "hi there, ignore me, I'm in a too" + assert a.instancevalue == 42 + A.classvalue = 123 + assert a.classvalue == 123 + assert a.foobar(5) == 'foobar(5,)' + assert a.__dict__ == {'instancevalue': 42, + '__getattr__': a.__getattr__, + '__setattr__': a.__setattr__} + assert a.__class__ is A + # This follows the Python 2.5 rules, more precisely. + # It is still valid in Python 2.7 too. + assert repr(a) == '__repr__()' + assert str(a) == '__str__()' + assert unicode(a) == u'__unicode__()' + b = B() + b.__getattr__ = "hi there, ignore me, I'm in b" + b.__setattr__ = "hi there, ignore me, I'm in b too" + assert 'called' not in b.__dict__ # and not e.g. ('__init__', ()) + assert len(b) == 42 + assert b.called == ('__len__', ()) + assert a[5] == '__getitem__(5,)' + b[6] = 7 + assert b.called == ('__setitem__', (6, 7)) + del b[8] + assert b.called == ('__delitem__', (8,)) + # + class C: + def __getattr__(self, name): + if name == '__iter__': + return lambda: iter([3, 33, 333]) + raise AttributeError + assert list(iter(C())) == [3, 33, 333] + # + class C: + def __getattr__(self, name): + if name == '__getitem__': + return lambda n: [3, 33, 333][n] + raise AttributeError + assert list(iter(C())) == [3, 33, 333] + # + assert a[:6] == '__getslice__(0, 6)' + b[3:5] = 7 + assert b.called == ('__setslice__', (3, 5, 7)) + del b[:-1000] + assert b.called == ('__delslice__', (0, -958)) # adds len(b)... + assert a(5) == '__call__(5,)' + raises(TypeError, bool, a) # "should return an int" + assert not not b + # + class C: + def __getattr__(self, name): + if name == '__nonzero__': + return lambda: False + raise AttributeError + assert not C() + # + class C: + def __getattr__(self, name): + if name == '__len__': + return lambda: 0 + raise AttributeError + assert not C() + # + #assert cmp(b, 43) == 0 # because __eq__(43) returns 42, so True... + # ... I will leave this case as XXX implement me + assert hash(b) == 42 + assert range(100, 200)[b] == 142 + assert "foo" in b + # + class C: + def __iter__(self): + return self + def __getattr__(self, name): + if name == 'next': + return lambda: 'the next item' + raise AttributeError + for x in C(): + assert x == 'the next item' + break + # + # XXX a really corner case: '__del__' + # + import operator + op_by_name = {"neg": operator.neg, + "pos": operator.pos, + "abs": abs, + "invert": operator.invert, + "int": int, + "long": long} + for opname, opfunc in op_by_name.items(): + assert opfunc(b) == 42 + assert b.called == ("__" + opname + "__", ()) + assert oct(a) == '__oct__()' + assert hex(a) == '__hex__()' + # + class C: + def __getattr__(self, name): + return lambda: 5.5 + raises(TypeError, float, b) + assert float(C()) == 5.5 + # + op_by_name = {'eq': operator.eq, + 'ne': operator.ne, + 'gt': operator.gt, + 'lt': operator.lt, + 'ge': operator.ge, + 'le': operator.le, + 'imod': operator.imod, + 'iand': operator.iand, + 'ipow': operator.ipow, + 'itruediv': operator.itruediv, + 'ilshift': operator.ilshift, + 'ixor': operator.ixor, + 'irshift': operator.irshift, + 'ifloordiv': operator.ifloordiv, + 'idiv': operator.idiv, + 'isub': operator.isub, + 'imul': operator.imul, + 'iadd': operator.iadd, + 'ior': operator.ior, + 'or': operator.or_, + 'and': operator.and_, + 'xor': operator.xor, + 'lshift': operator.lshift, + 'rshift': operator.rshift, + 'add': operator.add, + 'sub': operator.sub, + 'mul': operator.mul, + 'div': operator.div, + 'mod': operator.mod, + 'divmod': divmod, + 'floordiv': operator.floordiv, + 'truediv': operator.truediv} + for opname, opfunc in op_by_name.items(): + assert opfunc(b, 5) == 42 + assert b.called == ("__" + opname + "__", (5,)) + x, y = coerce(b, 5) + assert x is b + assert y == 5 + + class AppTestOldStyleSharing(AppTestOldstyle): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withsharingdict": True}) Modified: pypy/branch/jit-bounds/pypy/module/_socket/test/test_sock_app.py ============================================================================== --- pypy/branch/jit-bounds/pypy/module/_socket/test/test_sock_app.py (original) +++ pypy/branch/jit-bounds/pypy/module/_socket/test/test_sock_app.py Tue Sep 7 19:22:31 2010 @@ -221,6 +221,21 @@ "(_socket, host, port): return _socket.getaddrinfo(host, port)") assert space.unwrap(w_l) == info +def test_unknown_addr_as_object(): + from pypy.rlib import rsocket + from pypy.rpython.lltypesystem import lltype, rffi + + c_addr = lltype.malloc(rsocket._c.sockaddr, flavor='raw') + c_addr.c_sa_data[0] = 'c' + rffi.setintfield(c_addr, 'c_sa_family', 15) + # XXX what size to pass here? for the purpose of this test it has + # to be short enough so we have some data, 1 sounds good enough + # + sizeof USHORT + w_obj = rsocket.Address(c_addr, 1 + 2).as_object(space) + assert space.is_true(space.isinstance(w_obj, space.w_tuple)) + assert space.int_w(space.getitem(w_obj, space.wrap(0))) == 15 + assert space.str_w(space.getitem(w_obj, space.wrap(1))) == 'c' + def test_getnameinfo(): host = "127.0.0.1" port = 25 @@ -440,7 +455,6 @@ s2 = s.dup() assert s.fileno() != s2.fileno() assert s.getsockname() == s2.getsockname() - def test_buffer_or_unicode(self): # Test that send/sendall/sendto accept a buffer or a unicode as arg Modified: pypy/branch/jit-bounds/pypy/module/cpyext/api.py ============================================================================== --- pypy/branch/jit-bounds/pypy/module/cpyext/api.py (original) +++ pypy/branch/jit-bounds/pypy/module/cpyext/api.py Tue Sep 7 19:22:31 2010 @@ -62,11 +62,15 @@ VA_LIST_P = rffi.VOIDP # rffi.COpaquePtr('va_list') CONST_STRING = lltype.Ptr(lltype.Array(lltype.Char, - hints={'nolength': True})) + hints={'nolength': True}), + use_cache=False) CONST_WSTRING = lltype.Ptr(lltype.Array(lltype.UniChar, - hints={'nolength': True})) + hints={'nolength': True}), + use_cache=False) assert CONST_STRING is not rffi.CCHARP +assert CONST_STRING == rffi.CCHARP assert CONST_WSTRING is not rffi.CWCHARP +assert CONST_WSTRING == rffi.CWCHARP # FILE* interface FILEP = rffi.COpaquePtr('FILE') Modified: pypy/branch/jit-bounds/pypy/module/posix/test/test_posix2.py ============================================================================== --- pypy/branch/jit-bounds/pypy/module/posix/test/test_posix2.py (original) +++ pypy/branch/jit-bounds/pypy/module/posix/test/test_posix2.py Tue Sep 7 19:22:31 2010 @@ -324,15 +324,12 @@ if hasattr(__import__(os.name), "openpty"): def test_openpty(self): os = self.posix - master_fd, slave_fd = self.posix.openpty() - try: - assert isinstance(master_fd, int) - assert isinstance(slave_fd, int) - os.write(slave_fd, 'x') - assert os.read(master_fd, 1) == 'x' - finally: - os.close(master_fd) - os.close(slave_fd) + master_fd, slave_fd = os.openpty() + assert isinstance(master_fd, int) + assert isinstance(slave_fd, int) + os.write(slave_fd, 'x\n') + data = os.read(master_fd, 100) + assert data.startswith('x') if hasattr(__import__(os.name), "execv"): Modified: pypy/branch/jit-bounds/pypy/objspace/std/objspace.py ============================================================================== --- pypy/branch/jit-bounds/pypy/objspace/std/objspace.py (original) +++ pypy/branch/jit-bounds/pypy/objspace/std/objspace.py Tue Sep 7 19:22:31 2010 @@ -405,8 +405,8 @@ def getattr(self, w_obj, w_name): if not self.config.objspace.std.getattributeshortcut: return DescrOperation.getattr(self, w_obj, w_name) - # an optional shortcut for performance + w_type = self.type(w_obj) w_descr = w_type.getattribute_if_not_from_object() if w_descr is not None: Modified: pypy/branch/jit-bounds/pypy/objspace/std/tupleobject.py ============================================================================== --- pypy/branch/jit-bounds/pypy/objspace/std/tupleobject.py (original) +++ pypy/branch/jit-bounds/pypy/objspace/std/tupleobject.py Tue Sep 7 19:22:31 2010 @@ -10,7 +10,7 @@ class W_TupleObject(W_Object): from pypy.objspace.std.tupletype import tuple_typedef as typedef - _immutable_ = True + _immutable_fields_ = ['wrappeditems[*]'] def __init__(w_self, wrappeditems): make_sure_not_resized(wrappeditems) Modified: pypy/branch/jit-bounds/pypy/rlib/rarithmetic.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rlib/rarithmetic.py (original) +++ pypy/branch/jit-bounds/pypy/rlib/rarithmetic.py Tue Sep 7 19:22:31 2010 @@ -74,7 +74,7 @@ def widen(n): from pypy.rpython.lltypesystem import lltype if _should_widen_type(lltype.typeOf(n)): - return int(n) + return intmask(n) else: return n widen._annspecialcase_ = 'specialize:argtype(0)' Modified: pypy/branch/jit-bounds/pypy/rlib/rsocket.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rlib/rsocket.py (original) +++ pypy/branch/jit-bounds/pypy/rlib/rsocket.py Tue Sep 7 19:22:31 2010 @@ -100,8 +100,6 @@ def lock(self, TYPE=_c.sockaddr): """Return self.addr_p, cast as a pointer to TYPE. Must call unlock()! """ - if not (self.minlen <= self.addrlen <= self.maxlen): - raise RSocketError("invalid address") return rffi.cast(lltype.Ptr(TYPE), self.addr_p) lock._annspecialcase_ = 'specialize:ll' Modified: pypy/branch/jit-bounds/pypy/rlib/rweakref.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rlib/rweakref.py (original) +++ pypy/branch/jit-bounds/pypy/rlib/rweakref.py Tue Sep 7 19:22:31 2010 @@ -78,7 +78,7 @@ return self.__class__, def method_get(self, s_key): - assert isinstance(s_key, annmodel.SomeString) + assert annmodel.SomeString(can_be_None=True).contains(s_key) return annmodel.SomeInstance(self.valueclassdef, can_be_None=True) def method_set(self, s_key, s_value): Modified: pypy/branch/jit-bounds/pypy/rpython/lltypesystem/llgroup.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/lltypesystem/llgroup.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/lltypesystem/llgroup.py Tue Sep 7 19:22:31 2010 @@ -99,6 +99,7 @@ '&~0xFFFF' or with a direct masking like '&0x10000' (resp. on 64-bit platform, with '&~0xFFFFFFFF' or '&0x100000000'). """ + __slots__ = ['lowpart', 'rest'] MASK = (1<= HALFSHIFT + return self.rest >> other + def __eq__(self, other): if (isinstance(other, CombinedSymbolic) and self.lowpart is other.lowpart): Modified: pypy/branch/jit-bounds/pypy/rpython/lltypesystem/llmemory.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/lltypesystem/llmemory.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/lltypesystem/llmemory.py Tue Sep 7 19:22:31 2010 @@ -361,19 +361,27 @@ # ____________________________________________________________ +def _sizeof_none(TYPE): + assert not TYPE._is_varsize() + return ItemOffset(TYPE) +_sizeof_none._annspecialcase_ = 'specialize:memo' + +def _sizeof_int(TYPE, n): + "NOT_RPYTHON" + if isinstance(TYPE, lltype.Struct): + return FieldOffset(TYPE, TYPE._arrayfld) + \ + itemoffsetof(TYPE._flds[TYPE._arrayfld], n) + else: + raise Exception("don't know how to take the size of a %r"%TYPE) + def sizeof(TYPE, n=None): if n is None: - assert not TYPE._is_varsize() - return ItemOffset(TYPE) + return _sizeof_none(TYPE) + elif isinstance(TYPE, lltype.Array): + return itemoffsetof(TYPE) + _sizeof_none(TYPE.OF) * n else: - if isinstance(TYPE, lltype.Array): - return itemoffsetof(TYPE, n) - elif isinstance(TYPE, lltype.Struct): - return FieldOffset(TYPE, TYPE._arrayfld) + \ - itemoffsetof(TYPE._flds[TYPE._arrayfld], n) - else: - raise Exception("don't know how to take the size of a %r"%TYPE) -sizeof._annspecialcase_ = 'specialize:memo' # only for n == None + return _sizeof_int(TYPE, n) +sizeof._annspecialcase_ = 'specialize:arg(0)' def offsetof(TYPE, fldname): assert fldname in TYPE._flds @@ -389,6 +397,7 @@ # ------------------------------------------------------------- class fakeaddress(object): + __slots__ = ['ptr'] # NOTE: the 'ptr' in the addresses must be normalized. # Use cast_ptr_to_adr() instead of directly fakeaddress() if unsure. def __init__(self, ptr): @@ -530,7 +539,6 @@ pass NULL = fakeaddress(None) -NULL.intaddress = 0 # this is to make memory.lladdress more happy Address = lltype.Primitive("Address", NULL) # GCREF is similar to Address but it is GC-aware Modified: pypy/branch/jit-bounds/pypy/rpython/lltypesystem/lloperation.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/lltypesystem/lloperation.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/lltypesystem/lloperation.py Tue Sep 7 19:22:31 2010 @@ -85,16 +85,20 @@ fold = roproperty(get_fold_impl) def is_pure(self, args_v): - return (self.canfold or # canfold => pure operation - self is llop.debug_assert or # debug_assert is pure enough - # reading from immutable - (self in (llop.getfield, llop.getarrayitem) and - args_v[0].concretetype.TO._hints.get('immutable')) or - (self is llop.getfield and # reading from immutable_field - 'immutable_fields' in args_v[0].concretetype.TO._hints and - args_v[1].value in args_v[0].concretetype.TO - ._hints['immutable_fields'].fields)) - # XXX: what about ootype immutable arrays? + if self.canfold: # canfold => pure operation + return True + if self is llop.debug_assert: # debug_assert is pure enough + return True + # reading from immutable (lltype) + if self is llop.getfield or self is llop.getarrayitem: + field = getattr(args_v[1], 'value', None) + return args_v[0].concretetype.TO._immutable_field(field) + # reading from immutable (ootype) (xxx what about arrays?) + if self is llop.oogetfield: + field = getattr(args_v[1], 'value', None) + return args_v[0].concretetype._immutable_field(field) + # default + return False def __repr__(self): return '' % (getattr(self, 'opname', '?'),) Modified: pypy/branch/jit-bounds/pypy/rpython/lltypesystem/lltype.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/lltypesystem/lltype.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/lltypesystem/lltype.py Tue Sep 7 19:22:31 2010 @@ -37,7 +37,7 @@ return ''%(self.TYPE,) -def saferecursive(func, defl): +def saferecursive(func, defl, TLS=TLS): def safe(*args): try: seeing = TLS.seeing @@ -54,7 +54,7 @@ return safe #safe_equal = saferecursive(operator.eq, True) -def safe_equal(x, y): +def safe_equal(x, y, TLS=TLS): # a specialized version for performance try: seeing = TLS.seeing_eq @@ -97,7 +97,7 @@ raise TypeError return value - def __hash__(self): + def __hash__(self, TLS=TLS): # cannot use saferecursive() -- see test_lltype.test_hash(). # NB. the __cached_hash should neither be used nor updated # if we enter with hash_level > 0, because the computed @@ -297,6 +297,15 @@ n = 1 return _struct(self, n, initialization='example') + def _immutable_field(self, field): + if 'immutable_fields' in self._hints: + try: + s = self._hints['immutable_fields'].fields[field] + return s or True + except KeyError: + pass + return self._hints.get('immutable', False) + class RttiStruct(Struct): _runtime_type_info = None @@ -391,6 +400,9 @@ def _container_example(self): return _array(self, 1, initialization='example') + def _immutable_field(self, index=None): + return self._hints.get('immutable', False) + class GcArray(Array): _gckind = 'gc' def _inline_is_varsize(self, last): @@ -401,6 +413,19 @@ # behaves more or less like a Struct with fields item0, item1, ... # but also supports __getitem__(), __setitem__(), __len__(). + _cache = weakref.WeakValueDictionary() # cache the length-1 FixedSizeArrays + def __new__(cls, OF, length, **kwds): + if length == 1 and not kwds: + try: + obj = FixedSizeArray._cache[OF] + except KeyError: + obj = FixedSizeArray._cache[OF] = Struct.__new__(cls) + except TypeError: + obj = Struct.__new__(cls) + else: + obj = Struct.__new__(cls) + return obj + def __init__(self, OF, length, **kwds): fields = [('item%d' % i, OF) for i in range(length)] super(FixedSizeArray, self).__init__('array%d' % length, *fields, @@ -610,11 +635,22 @@ class Ptr(LowLevelType): __name__ = property(lambda self: '%sPtr' % self.TO.__name__) - def __init__(self, TO): + _cache = weakref.WeakValueDictionary() # cache the Ptrs + def __new__(cls, TO, use_cache=True): if not isinstance(TO, ContainerType): raise TypeError, ("can only point to a Container type, " "not to %s" % (TO,)) - self.TO = TO + if not use_cache: + obj = LowLevelType.__new__(cls) + else: + try: + return Ptr._cache[TO] + except KeyError: + obj = Ptr._cache[TO] = LowLevelType.__new__(cls) + except TypeError: + obj = LowLevelType.__new__(cls) + obj.TO = TO + return obj def _needsgc(self): # XXX deprecated interface Modified: pypy/branch/jit-bounds/pypy/rpython/lltypesystem/opimpl.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/lltypesystem/opimpl.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/lltypesystem/opimpl.py Tue Sep 7 19:22:31 2010 @@ -150,12 +150,7 @@ # we can constant-fold this if the innermost structure from which we # read the final field is immutable. T = lltype.typeOf(innermostcontainer).TO - if T._hints.get('immutable'): - pass - elif ('immutable_fields' in T._hints and - offsets[-1] in T._hints['immutable_fields'].fields): - pass - else: + if not T._immutable_field(offsets[-1]): raise TypeError("cannot fold getinteriorfield on mutable struct") assert not isinstance(ob, lltype._interior_ptr) return ob @@ -197,6 +192,18 @@ assert isinstance(y, int) return intmask(x - y) +def op_int_ge(x, y): + # special case for 'AddressOffset >= 0' + assert isinstance(x, (int, llmemory.AddressOffset)) + assert isinstance(y, int) + return x >= y + +def op_int_lt(x, y): + # special case for 'AddressOffset < 0' + assert isinstance(x, (int, llmemory.AddressOffset)) + assert isinstance(y, int) + return x < y + def op_int_between(a, b, c): assert lltype.typeOf(a) is lltype.Signed assert lltype.typeOf(b) is lltype.Signed @@ -222,6 +229,13 @@ assert isinstance(y, (int, llmemory.AddressOffset)) return intmask(x * y) +def op_int_rshift(x, y): + if not isinstance(x, int): + from pypy.rpython.lltypesystem import llgroup + assert isinstance(x, llgroup.CombinedSymbolic) + assert isinstance(y, int) + return x >> y + def op_int_floordiv(x, y): assert isinstance(x, (int, llmemory.AddressOffset)) assert isinstance(y, (int, llmemory.AddressOffset)) @@ -418,19 +432,15 @@ def op_getfield(p, name): checkptr(p) TYPE = lltype.typeOf(p).TO - if TYPE._hints.get('immutable'): - pass - elif ('immutable_fields' in TYPE._hints and - name in TYPE._hints['immutable_fields'].fields): - pass - else: + if not TYPE._immutable_field(name): raise TypeError("cannot fold getfield on mutable struct") return getattr(p, name) def op_getarrayitem(p, index): checkptr(p) - if not lltype.typeOf(p).TO._hints.get('immutable'): - raise TypeError("cannot fold getfield on mutable array") + ARRAY = lltype.typeOf(p).TO + if not ARRAY._immutable_field(index): + raise TypeError("cannot fold getarrayitem on mutable array") return p[index] def _normalize(x): Modified: pypy/branch/jit-bounds/pypy/rpython/lltypesystem/rffi.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/lltypesystem/rffi.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/lltypesystem/rffi.py Tue Sep 7 19:22:31 2010 @@ -593,9 +593,12 @@ """ str -> char* """ array = lltype.malloc(TYPEP.TO, len(s) + 1, flavor='raw') - for i in range(len(s)): + i = len(s) + array[i] = lastchar + i -= 1 + while i >= 0: array[i] = s[i] - array[len(s)] = lastchar + i -= 1 return array str2charp._annenforceargs_ = [strtype] Modified: pypy/branch/jit-bounds/pypy/rpython/lltypesystem/test/test_llgroup.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/lltypesystem/test/test_llgroup.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/lltypesystem/test/test_llgroup.py Tue Sep 7 19:22:31 2010 @@ -105,6 +105,8 @@ assert p == test.p1b assert cslist[0] & ~MASK == 0x45 << HALFSHIFT assert cslist[1] & ~MASK == 0x41 << HALFSHIFT + assert cslist[0] >> HALFSHIFT == 0x45 + assert cslist[1] >> (HALFSHIFT+1) == 0x41 >> 1 # return 42 return f Modified: pypy/branch/jit-bounds/pypy/rpython/lltypesystem/test/test_lloperation.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/lltypesystem/test/test_lloperation.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/lltypesystem/test/test_lloperation.py Tue Sep 7 19:22:31 2010 @@ -88,7 +88,7 @@ accessor = rclass.FieldListAccessor() S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), hints={'immutable_fields': accessor}) - accessor.initialize(S3, ['x']) + accessor.initialize(S3, {'x': ''}) v_s3 = Variable() v_s3.concretetype = lltype.Ptr(S3) assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) @@ -103,7 +103,7 @@ accessor = rclass.FieldListAccessor() S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), hints={'immutable_fields': accessor}) - accessor.initialize(S3, ['x']) + accessor.initialize(S3, {'x': ''}) # s1 = lltype.malloc(S1); s1.x = 45 py.test.raises(TypeError, llop.getfield, lltype.Signed, s1, 'x') Modified: pypy/branch/jit-bounds/pypy/rpython/lltypesystem/test/test_lltype.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/lltypesystem/test/test_lltype.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/lltypesystem/test/test_lltype.py Tue Sep 7 19:22:31 2010 @@ -781,6 +781,28 @@ p = cast_opaque_ptr(llmemory.GCREF, a) assert hash1 == identityhash(p) +def test_immutable_hint(): + S = GcStruct('S', ('x', lltype.Signed)) + assert S._immutable_field('x') == False + # + S = GcStruct('S', ('x', lltype.Signed), hints={'immutable': True}) + assert S._immutable_field('x') == True + # + class FieldListAccessor(object): + def __init__(self, fields): + self.fields = fields + S = GcStruct('S', ('x', lltype.Signed), + hints={'immutable_fields': FieldListAccessor({'x':''})}) + assert S._immutable_field('x') == True + # + class FieldListAccessor(object): + def __init__(self, fields): + self.fields = fields + S = GcStruct('S', ('x', lltype.Signed), + hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) + assert S._immutable_field('x') == '[*]' + + class TestTrackAllocation: def setup_method(self, func): start_tracking_allocations() Modified: pypy/branch/jit-bounds/pypy/rpython/memory/gc/base.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/memory/gc/base.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/memory/gc/base.py Tue Sep 7 19:22:31 2010 @@ -86,8 +86,7 @@ addr -= self.gcheaderbuilder.size_gc_header return llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR)) - def get_size(self, obj): - typeid = self.get_type_id(obj) + def _get_size_for_typeid(self, obj, typeid): size = self.fixed_size(typeid) if self.is_varsize(typeid): lenaddr = obj + self.varsize_offset_to_length(typeid) @@ -99,6 +98,9 @@ # gctypelayout.encode_type_shape() return size + def get_size(self, obj): + return self._get_size_for_typeid(obj, self.get_type_id(obj)) + def malloc(self, typeid, length=0, zero=False): """For testing. The interface used by the gctransformer is the four malloc_[fixed,var]size[_clear]() functions. @@ -218,7 +220,6 @@ pending = self._debug_pending while pending.non_empty(): obj = pending.pop() - self.debug_check_object(obj) self.trace(obj, self._debug_callback2, None) self._debug_seen.delete() self._debug_pending.delete() @@ -227,6 +228,7 @@ seen = self._debug_seen if not seen.contains(obj): seen.add(obj) + self.debug_check_object(obj) self._debug_pending.append(obj) def _debug_callback(self, root): obj = root.address[0] @@ -348,3 +350,23 @@ globals(), locals(), [classname]) GCClass = getattr(module, classname) return GCClass, GCClass.TRANSLATION_PARAMS + +def read_from_env(varname): + import os + value = os.environ.get(varname) + if value: + realvalue = value[:-1] + if value[-1] in 'kK': + factor = 1024 + elif value[-1] in 'mM': + factor = 1024*1024 + elif value[-1] in 'gG': + factor = 1024*1024*1024 + else: + factor = 1 + realvalue = value + try: + return int(float(realvalue) * factor) + except ValueError: + pass + return -1 Modified: pypy/branch/jit-bounds/pypy/rpython/memory/gc/generation.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/memory/gc/generation.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/memory/gc/generation.py Tue Sep 7 19:22:31 2010 @@ -2,6 +2,7 @@ from pypy.rpython.memory.gc.semispace import SemiSpaceGC from pypy.rpython.memory.gc.semispace import GCFLAG_EXTERNAL, GCFLAG_FORWARDED from pypy.rpython.memory.gc.semispace import GC_HASH_TAKEN_ADDR +from pypy.rpython.memory.gc.base import read_from_env from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage from pypy.rpython.lltypesystem import lltype, llmemory, llarena from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE @@ -625,18 +626,7 @@ import os def nursery_size_from_env(): - value = os.environ.get('PYPY_GENERATIONGC_NURSERY') - if value: - if value[-1] in 'kK': - factor = 1024 - value = value[:-1] - else: - factor = 1 - try: - return int(value) * factor - except ValueError: - pass - return -1 + return read_from_env('PYPY_GENERATIONGC_NURSERY') def best_nursery_size_for_L2cache(L2cache): # Heuristically, the best nursery size to choose is about half Modified: pypy/branch/jit-bounds/pypy/rpython/memory/gc/markcompact.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/memory/gc/markcompact.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/memory/gc/markcompact.py Tue Sep 7 19:22:31 2010 @@ -1,27 +1,17 @@ - -import time - from pypy.rpython.lltypesystem import lltype, llmemory, llarena, llgroup -from pypy.rpython.memory.gc.base import MovingGCBase +from pypy.rpython.memory.gc.base import MovingGCBase, read_from_env from pypy.rlib.debug import ll_assert, have_debug_prints from pypy.rlib.debug import debug_print, debug_start, debug_stop from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE from pypy.rpython.memory.support import get_address_stack, get_address_deque from pypy.rpython.memory.support import AddressDict from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage -from pypy.rlib.rarithmetic import ovfcheck +from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask from pypy.rpython.lltypesystem.lloperation import llop -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, running_on_llinterp from pypy.rpython.lltypesystem import rffi from pypy.rpython.memory.gcheader import GCHeaderBuilder -first_gcflag = 1 << 16 -GCFLAG_MARKBIT = first_gcflag << 0 -GCFLAG_HASHTAKEN = first_gcflag << 1 # someone already asked for the hash -GCFLAG_HASHFIELD = first_gcflag << 2 # we have an extra hash field - -memoryError = MemoryError() - # Mark'n'compact garbage collector # # main point of this GC is to save as much memory as possible @@ -34,41 +24,44 @@ # this gc works more or less like semispace, but has some essential # differencies. The main difference is that we have separate phases of # marking and assigning pointers, hence order of objects is preserved. -# This means we can reuse the same space if it did not grow enough. -# More importantly, in case we need to resize space we can copy it bit by -# bit, hence avoiding double memory consumption at peak times +# This means we can reuse the same space, overwriting it as we collect. -# so the algorithm itself is performed in 3 stages (module weakrefs and -# finalizers) +# so the algorithm itself is performed in 3 stages (modulo weakrefs and +# finalizers): # 1. We mark alive objects # 2. We walk all objects and assign forward pointers in the same order, # also updating all references -# 3. We compact the space by moving. In case we move to the same space, -# we use arena_new_view trick, which looks like new space to tests, -# but compiles to the same pointer. Also we use raw_memmove in case -# objects overlap. - -# Exact algorithm for space resizing: we keep allocated more space than needed -# (2x, can be even more), but it's full of zeroes. After each collection, -# we bump next_collect_after which is a marker where to start each collection. -# It should be exponential (but less than 2) from the size occupied by objects +# 3. We compact the space by moving. We use 'arena_new_view' trick, which +# looks like new space to tests, but compiles to the same pointer. +# Also we use raw_memmove in case the object overlaps with its destination. + +# After each collection, we bump 'next_collect_after' which is a marker +# where to start each collection. It should be exponential (but less +# than 2) from the size occupied by objects so far. # field optimization - we don't need forward pointer and flags at the same -# time. Instead we copy list of tids when we know how many objects are alive -# and store forward pointer there. +# time. Instead we copy the TIDs in a list when we know how many objects are +# alive, and store the forward pointer in the old object header. +first_gcflag_bit = LONG_BIT//2 +first_gcflag = 1 << first_gcflag_bit +GCFLAG_HASHTAKEN = first_gcflag << 0 # someone already asked for the hash +GCFLAG_HASHFIELD = first_gcflag << 1 # we have an extra hash field +# note that only the first 2 bits are preserved during a collection! +GCFLAG_MARKBIT = intmask(first_gcflag << (LONG_BIT//2-1)) +assert GCFLAG_MARKBIT < 0 # should be 0x80000000 + +GCFLAG_SAVED_HASHTAKEN = GCFLAG_HASHTAKEN >> first_gcflag_bit +GCFLAG_SAVED_HASHFIELD = GCFLAG_HASHFIELD >> first_gcflag_bit -# in case we need to grow space, we use -# current_space_size * FREE_SPACE_MULTIPLIER / FREE_SPACE_DIVIDER + needed -FREE_SPACE_MULTIPLIER = 3 -FREE_SPACE_DIVIDER = 2 -FREE_SPACE_ADD = 256 -# XXX adjust -GC_CLEARANCE = 32*1024 TID_TYPE = llgroup.HALFWORD BYTES_PER_TID = rffi.sizeof(TID_TYPE) +TID_BACKUP = rffi.CArray(TID_TYPE) + +def translated_to_c(): + return we_are_translated() and not running_on_llinterp class MarkCompactGC(MovingGCBase): @@ -77,37 +70,63 @@ withhash_flag_is_in_field = 'tid', GCFLAG_HASHFIELD # ^^^ all prebuilt objects have GCFLAG_HASHTAKEN, but only some have # GCFLAG_HASHFIELD (and then they are one word longer). - TID_BACKUP = lltype.Array(TID_TYPE, hints={'nolength':True}) - WEAKREF_OFFSETS = lltype.Array(lltype.Signed) + # The default space size is 1.9375 GB, i.e. almost 2 GB, allocated as + # a big mmap. The process does not actually consume that space until + # needed, of course. + TRANSLATION_PARAMS = {'space_size': int((1 + 15.0/16)*1024*1024*1024), + 'min_next_collect_after': 16*1024*1024} # 16MB - TRANSLATION_PARAMS = {'space_size': 8*1024*1024} # XXX adjust - - malloc_zero_filled = True + malloc_zero_filled = False inline_simple_malloc = True inline_simple_malloc_varsize = True - first_unused_gcflag = first_gcflag << 3 - total_collection_time = 0.0 - total_collection_count = 0 - - def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE, space_size=4096): - import py; py.test.skip("Disabled for now, sorry") - self.param_space_size = space_size + #total_collection_time = 0.0 + #total_collection_count = 0 + + free = NULL + next_collect_after = -1 + + def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE, space_size=4096, + min_next_collect_after=128): MovingGCBase.__init__(self, config, chunk_size) + self.space_size = space_size + self.min_next_collect_after = min_next_collect_after - def setup(self): - self.space_size = self.param_space_size - self.next_collect_after = self.param_space_size/2 # whatever... + def next_collection(self, used_space, num_objects_so_far, requested_size): + used_space += BYTES_PER_TID * num_objects_so_far + ll_assert(used_space <= self.space_size, + "used_space + num_objects_so_far overflow") + try: + next = (used_space // 3) * 2 + requested_size + except OverflowError: + next = self.space_size + if next < self.min_next_collect_after: + next = self.min_next_collect_after + if next > self.space_size - used_space: + next = self.space_size - used_space + # The value we return guarantees that used_space + next <= space_size, + # with 'BYTES_PER_TID*num_objects_so_far' included in used_space. + # Normally, the value we return should also be at least requested_size + # unless we are out of memory. + return next - self.program_start_time = time.time() - self.space = llarena.arena_malloc(self.space_size, True) - ll_assert(bool(self.space), "couldn't allocate arena") + def setup(self): + envsize = read_from_env('PYPY_MARKCOMPACTGC_MAX') + if envsize >= 4096: + self.space_size = envsize & ~4095 + mincollect = read_from_env('PYPY_MARKCOMPACTGC_MIN') + if mincollect >= 4096: + self.min_next_collect_after = mincollect + + #self.program_start_time = time.time() + self.space = llarena.arena_malloc(self.space_size, False) + if not self.space: + raise CannotAllocateGCArena self.free = self.space - self.top_of_space = self.space + self.next_collect_after MovingGCBase.setup(self) self.objects_with_finalizers = self.AddressDeque() - self.objects_with_weakrefs = self.AddressStack() - self.tid_backup = lltype.nullptr(self.TID_BACKUP) + self.tid_backup = lltype.nullptr(TID_BACKUP) + self.next_collect_after = self.next_collection(0, 0, 0) def init_gc_object(self, addr, typeid16, flags=0): hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR)) @@ -115,216 +134,204 @@ def init_gc_object_immortal(self, addr, typeid16, flags=0): hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR)) - flags |= GCFLAG_HASHTAKEN + flags |= GCFLAG_HASHTAKEN | GCFLAG_MARKBIT + # All prebuilt GC objects have the GCFLAG_MARKBIT always set. + # That's convenient to make the GC always think that they + # survive the current collection. hdr.tid = self.combine(typeid16, flags) - # XXX we can store forward_ptr to itself, if we fix C backend - # so that get_forwarding_address(obj) returns - # obj itself if obj is a prebuilt object - def malloc_fixedsize_clear(self, typeid16, size, can_collect, - has_finalizer=False, contains_weakptr=False): - size_gc_header = self.gcheaderbuilder.size_gc_header - totalsize = size_gc_header + size - result = self.free - if raw_malloc_usage(totalsize) > self.top_of_space - result: - result = self.obtain_free_space(totalsize) + def _get_memory(self, totalsize): + # also counts the space that will be needed during the following + # collection to store the TID + requested_size = raw_malloc_usage(totalsize) + BYTES_PER_TID + self.next_collect_after -= requested_size + if self.next_collect_after < 0: + result = self.obtain_free_space(requested_size) + else: + result = self.free + self.free += totalsize llarena.arena_reserve(result, totalsize) + return result + _get_memory._always_inline_ = True + + def _get_totalsize_var(self, nonvarsize, itemsize, length): + try: + varsize = ovfcheck(itemsize * length) + except OverflowError: + raise MemoryError + # Careful to detect overflows. The following works even if varsize + # is almost equal to sys.maxint; morever, self.space_size is known + # to be at least 4095 bytes smaller than sys.maxint, so this function + # always raises instead of returning an integer >= sys.maxint-4095. + if (raw_malloc_usage(varsize) > self.space_size - + raw_malloc_usage(nonvarsize)): + raise MemoryError + return llarena.round_up_for_allocation(nonvarsize + varsize) + _get_totalsize_var._always_inline_ = True + + def _setup_object(self, result, typeid16, has_finalizer): + size_gc_header = self.gcheaderbuilder.size_gc_header self.init_gc_object(result, typeid16) - self.free += totalsize if has_finalizer: self.objects_with_finalizers.append(result + size_gc_header) - if contains_weakptr: - self.objects_with_weakrefs.append(result + size_gc_header) return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF) - + _setup_object._always_inline_ = True + + def malloc_fixedsize(self, typeid16, size, can_collect, + has_finalizer=False, contains_weakptr=False): + size_gc_header = self.gcheaderbuilder.size_gc_header + totalsize = size_gc_header + size + result = self._get_memory(totalsize) + return self._setup_object(result, typeid16, has_finalizer) + + def malloc_fixedsize_clear(self, typeid16, size, can_collect, + has_finalizer=False, contains_weakptr=False): + size_gc_header = self.gcheaderbuilder.size_gc_header + totalsize = size_gc_header + size + result = self._get_memory(totalsize) + llmemory.raw_memclear(result, totalsize) + return self._setup_object(result, typeid16, has_finalizer) + def malloc_varsize_clear(self, typeid16, length, size, itemsize, offset_to_length, can_collect): size_gc_header = self.gcheaderbuilder.size_gc_header nonvarsize = size_gc_header + size - try: - varsize = ovfcheck(itemsize * length) - totalsize = ovfcheck(nonvarsize + varsize) - except OverflowError: - raise memoryError - result = self.free - if raw_malloc_usage(totalsize) > self.top_of_space - result: - result = self.obtain_free_space(totalsize) - llarena.arena_reserve(result, totalsize) - self.init_gc_object(result, typeid16) + totalsize = self._get_totalsize_var(nonvarsize, itemsize, length) + result = self._get_memory(totalsize) + llmemory.raw_memclear(result, totalsize) (result + size_gc_header + offset_to_length).signed[0] = length - self.free = result + llarena.round_up_for_allocation(totalsize) - return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF) + return self._setup_object(result, typeid16, False) - def obtain_free_space(self, totalsize): - # a bit of tweaking to maximize the performance and minimize the - # amount of code in an inlined version of malloc_fixedsize_clear() - if not self.try_obtain_free_space(totalsize): - raise memoryError + def obtain_free_space(self, requested_size): + if self.free == NULL: + return self._emergency_initial_block(requested_size) + while True: + executed_some_finalizers = self.markcompactcollect(requested_size) + self.next_collect_after -= requested_size + if self.next_collect_after >= 0: + break # ok + else: + if executed_some_finalizers: + pass # try again to do a collection + else: + raise MemoryError return self.free obtain_free_space._dont_inline_ = True - def try_obtain_free_space(self, needed): - needed = raw_malloc_usage(needed) - while 1: - self.markcompactcollect(needed) - missing = needed - (self.top_of_space - self.free) - if missing < 0: - return True - - def new_space_size(self, occupied, needed): - res = (occupied * FREE_SPACE_MULTIPLIER / - FREE_SPACE_DIVIDER + FREE_SPACE_ADD + needed) - # align it to 4096, which is somewhat around page size - return ((res/4096) + 1) * 4096 - - def double_space_size(self, minimal_size): - while self.space_size <= minimal_size: - self.space_size *= 2 - toaddr = llarena.arena_malloc(self.space_size, True) - return toaddr - - def compute_alive_objects(self): - fromaddr = self.space - addraftercollect = self.space - num = 1 - while fromaddr < self.free: - size_gc_header = self.gcheaderbuilder.size_gc_header - tid = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR)).tid - obj = fromaddr + size_gc_header - objsize = self.get_size(obj) - objtotalsize = size_gc_header + objsize - if self.marked(obj): - copy_has_hash_field = ((tid & GCFLAG_HASHFIELD) != 0 or - ((tid & GCFLAG_HASHTAKEN) != 0 and - addraftercollect < fromaddr)) - addraftercollect += raw_malloc_usage(objtotalsize) - if copy_has_hash_field: - addraftercollect += llmemory.sizeof(lltype.Signed) - num += 1 - fromaddr += objtotalsize - if tid & GCFLAG_HASHFIELD: - fromaddr += llmemory.sizeof(lltype.Signed) - ll_assert(addraftercollect <= fromaddr, - "markcompactcollect() is trying to increase memory usage") - self.totalsize_of_objs = addraftercollect - self.space - return num + def _emergency_initial_block(self, requested_size): + # xxx before the GC is fully setup, we might get there. Hopefully + # we will only allocate a couple of strings, e.g. in read_from_env(). + # Just allocate them raw and leak them. + debug_start("gc-initial-block") + debug_print("leaking", requested_size, "bytes") + debug_stop("gc-initial-block") + return llmemory.raw_malloc(requested_size) def collect(self, gen=0): self.markcompactcollect() - - def markcompactcollect(self, needed=0): - start_time = self.debug_collect_start() + + def markcompactcollect(self, requested_size=0): + self.debug_collect_start(requested_size) self.debug_check_consistency() - self.to_see = self.AddressStack() - self.mark_roots_recursively() - if (self.objects_with_finalizers.non_empty() or - self.run_finalizers.non_empty()): - self.mark_objects_with_finalizers() - self._trace_and_mark() + # + # Mark alive objects + # + self.to_see = self.AddressDeque() + self.trace_from_roots() self.to_see.delete() - num_of_alive_objs = self.compute_alive_objects() - size_of_alive_objs = self.totalsize_of_objs - totalsize = self.new_space_size(size_of_alive_objs, needed + - num_of_alive_objs * BYTES_PER_TID) - tid_backup_size = (llmemory.sizeof(self.TID_BACKUP, 0) + - llmemory.sizeof(TID_TYPE) * num_of_alive_objs) - used_space_now = self.next_collect_after + raw_malloc_usage(tid_backup_size) - if totalsize >= self.space_size or used_space_now >= self.space_size: - toaddr = self.double_space_size(totalsize) - llarena.arena_reserve(toaddr + size_of_alive_objs, tid_backup_size) - self.tid_backup = llmemory.cast_adr_to_ptr( - toaddr + size_of_alive_objs, - lltype.Ptr(self.TID_BACKUP)) - resizing = True - else: - toaddr = llarena.arena_new_view(self.space) - llarena.arena_reserve(self.top_of_space, tid_backup_size) - self.tid_backup = llmemory.cast_adr_to_ptr( - self.top_of_space, - lltype.Ptr(self.TID_BACKUP)) - resizing = False - self.next_collect_after = totalsize - weakref_offsets = self.collect_weakref_offsets() - finaladdr = self.update_forward_pointers(toaddr, num_of_alive_objs) + # + # Prepare new views on the same memory + # + toaddr = llarena.arena_new_view(self.space) + maxnum = self.space_size - (self.free - self.space) + maxnum /= BYTES_PER_TID + llarena.arena_reserve(self.free, llmemory.sizeof(TID_BACKUP, maxnum)) + self.tid_backup = llmemory.cast_adr_to_ptr(self.free, + lltype.Ptr(TID_BACKUP)) + # + # Walk all objects and assign forward pointers in the same order, + # also updating all references + # + self.update_forward_pointers(toaddr, maxnum) if (self.run_finalizers.non_empty() or self.objects_with_finalizers.non_empty()): self.update_run_finalizers() - if self.objects_with_weakrefs.non_empty(): - self.invalidate_weakrefs(weakref_offsets) + self.update_objects_with_id() - self.compact(resizing) - if not resizing: - size = toaddr + self.space_size - finaladdr - llarena.arena_reset(finaladdr, size, True) - else: - if we_are_translated(): - # because we free stuff already in raw_memmove, we - # would get double free here. Let's free it anyway - llarena.arena_free(self.space) - llarena.arena_reset(toaddr + size_of_alive_objs, tid_backup_size, - True) - self.space = toaddr - self.free = finaladdr - self.top_of_space = toaddr + self.next_collect_after + self.compact() + # + self.tid_backup = lltype.nullptr(TID_BACKUP) + self.free = self.finaladdr + self.next_collect_after = self.next_collection(self.finaladdr - toaddr, + self.num_alive_objs, + requested_size) + # + if not translated_to_c(): + remaining_size = (toaddr + self.space_size) - self.finaladdr + llarena.arena_reset(self.finaladdr, remaining_size, False) + llarena.arena_free(self.space) + self.space = toaddr + # self.debug_check_consistency() - self.tid_backup = lltype.nullptr(self.TID_BACKUP) + self.debug_collect_finish() + if self.next_collect_after < 0: + raise MemoryError + # if self.run_finalizers.non_empty(): self.execute_finalizers() - self.debug_collect_finish(start_time) - - def collect_weakref_offsets(self): - weakrefs = self.objects_with_weakrefs - new_weakrefs = self.AddressStack() - weakref_offsets = lltype.malloc(self.WEAKREF_OFFSETS, - weakrefs.length(), flavor='raw') - i = 0 - while weakrefs.non_empty(): - obj = weakrefs.pop() - offset = self.weakpointer_offset(self.get_type_id(obj)) - weakref_offsets[i] = offset - new_weakrefs.append(obj) - i += 1 - self.objects_with_weakrefs = new_weakrefs - weakrefs.delete() - return weakref_offsets + return True # executed some finalizers + else: + return False # no finalizer executed - def debug_collect_start(self): - if have_debug_prints(): + def debug_collect_start(self, requested_size): + if 1:# have_debug_prints(): debug_start("gc-collect") debug_print() - debug_print(".----------- Full collection ------------------") - start_time = time.time() - return start_time - return -1 - - def debug_collect_finish(self, start_time): - if start_time != -1: - end_time = time.time() - elapsed_time = end_time - start_time - self.total_collection_time += elapsed_time - self.total_collection_count += 1 - total_program_time = end_time - self.program_start_time - ct = self.total_collection_time - cc = self.total_collection_count - debug_print("| number of collections so far ", - cc) - debug_print("| total collections per second: ", - cc / total_program_time) - debug_print("| total time in markcompact-collect: ", - ct, "seconds") - debug_print("| percentage collection<->total time:", - ct * 100.0 / total_program_time, "%") + debug_print(".----------- Full collection -------------------") + debug_print("| requested size:", + requested_size) + #start_time = time.time() + #return start_time + #return -1 + + def debug_collect_finish(self): + if 1:# start_time != -1: + #end_time = time.time() + #elapsed_time = end_time - start_time + #self.total_collection_time += elapsed_time + #self.total_collection_count += 1 + #total_program_time = end_time - self.program_start_time + #ct = self.total_collection_time + #cc = self.total_collection_count + #debug_print("| number of collections so far ", + # cc) + debug_print("| total space size ", + self.space_size) + debug_print("| number of objects alive ", + self.num_alive_objs) + debug_print("| used space size ", + self.free - self.space) + debug_print("| next collection after ", + self.next_collect_after) + #debug_print("| total collections per second: ", + # cc / total_program_time) + #debug_print("| total time in markcompact-collect: ", + # ct, "seconds") + #debug_print("| percentage collection<->total time:", + # ct * 100.0 / total_program_time, "%") debug_print("`----------------------------------------------") debug_stop("gc-collect") def update_run_finalizers(self): - run_finalizers = self.AddressDeque() - while self.run_finalizers.non_empty(): - obj = self.run_finalizers.popleft() - run_finalizers.append(self.get_forwarding_address(obj)) - self.run_finalizers.delete() - self.run_finalizers = run_finalizers + if self.run_finalizers.non_empty(): # uncommon case + run_finalizers = self.AddressDeque() + while self.run_finalizers.non_empty(): + obj = self.run_finalizers.popleft() + run_finalizers.append(self.get_forwarding_address(obj)) + self.run_finalizers.delete() + self.run_finalizers = run_finalizers + # objects_with_finalizers = self.AddressDeque() while self.objects_with_finalizers.non_empty(): obj = self.objects_with_finalizers.popleft() @@ -353,90 +360,156 @@ tid = self.header(addr).tid return llop.extract_ushort(llgroup.HALFWORD, tid) - def mark_roots_recursively(self): + def trace_from_roots(self): self.root_walker.walk_roots( - MarkCompactGC._mark_root_recursively, # stack roots - MarkCompactGC._mark_root_recursively, # static in prebuilt non-gc structures - MarkCompactGC._mark_root_recursively) # static in prebuilt gc objects + MarkCompactGC._mark_root, # stack roots + MarkCompactGC._mark_root, # static in prebuilt non-gc structures + MarkCompactGC._mark_root) # static in prebuilt gc objects + if (self.objects_with_finalizers.non_empty() or + self.run_finalizers.non_empty()): + self.trace_from_objects_with_finalizers() self._trace_and_mark() def _trace_and_mark(self): - # XXX depth-first tracing... it can consume a lot of rawmalloced - # memory for very long stacks in some cases while self.to_see.non_empty(): - obj = self.to_see.pop() + obj = self.to_see.popleft() self.trace(obj, self._mark_obj, None) def _mark_obj(self, pointer, ignored): - obj = pointer.address[0] - if self.marked(obj): - return - self.mark(obj) - self.to_see.append(obj) + self.mark(pointer.address[0]) - def _mark_root_recursively(self, root): + def _mark_root(self, root): self.mark(root.address[0]) - self.to_see.append(root.address[0]) def mark(self, obj): - self.header(obj).tid |= GCFLAG_MARKBIT + if not self.marked(obj): + self.header(obj).tid |= GCFLAG_MARKBIT + self.to_see.append(obj) def marked(self, obj): - return self.header(obj).tid & GCFLAG_MARKBIT + # should work both if tid contains a CombinedSymbolic (for dying + # objects, at this point), or a plain integer. + return MovingGCBase.header(self, obj).tid & GCFLAG_MARKBIT + + def toaddr_smaller_than_fromaddr(self, toaddr, fromaddr): + if translated_to_c(): + return toaddr < fromaddr + else: + # convert the addresses to integers, because they are + # theoretically not from the same arena + return toaddr - self.base_forwarding_addr < fromaddr - self.space - def update_forward_pointers(self, toaddr, num_of_alive_objs): - self.base_forwarding_addr = toaddr + def update_forward_pointers(self, toaddr, maxnum): + self.base_forwarding_addr = base_forwarding_addr = toaddr fromaddr = self.space size_gc_header = self.gcheaderbuilder.size_gc_header - i = 0 + num = 0 while fromaddr < self.free: hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR)) obj = fromaddr + size_gc_header - objsize = self.get_size(obj) - totalsize = size_gc_header + objsize - if not self.marked(obj): - self.set_null_forwarding_address(obj, i) - else: - llarena.arena_reserve(toaddr, totalsize) - self.set_forwarding_address(obj, toaddr, i) - toaddr += totalsize - i += 1 - fromaddr += totalsize + # compute the original object size, including the + # optional hash field + basesize = size_gc_header + self.get_size(obj) + totalsrcsize = basesize + if hdr.tid & GCFLAG_HASHFIELD: # already a hash field, copy it too + totalsrcsize += llmemory.sizeof(lltype.Signed) + # + if self.marked(obj): + # the object is marked as suriving. Compute the new object + # size + totaldstsize = totalsrcsize + if (hdr.tid & (GCFLAG_HASHTAKEN|GCFLAG_HASHFIELD) == + GCFLAG_HASHTAKEN): + # grow a new hash field -- with the exception: if + # the object actually doesn't move, don't + # (otherwise, we get a bogus toaddr > fromaddr) + if self.toaddr_smaller_than_fromaddr(toaddr, fromaddr): + totaldstsize += llmemory.sizeof(lltype.Signed) + # + if not translated_to_c(): + llarena.arena_reserve(toaddr, basesize) + if (raw_malloc_usage(totaldstsize) > + raw_malloc_usage(basesize)): + llarena.arena_reserve(toaddr + basesize, + llmemory.sizeof(lltype.Signed)) + # + # save the field hdr.tid in the array tid_backup + ll_assert(num < maxnum, "overflow of the tid_backup table") + self.tid_backup[num] = self.get_type_id(obj) + num += 1 + # compute forward_offset, the offset to the future copy + # of this object + forward_offset = toaddr - base_forwarding_addr + # copy the first two gc flags in forward_offset + ll_assert(forward_offset & 3 == 0, "misalignment!") + forward_offset |= (hdr.tid >> first_gcflag_bit) & 3 + hdr.tid = forward_offset | GCFLAG_MARKBIT + ll_assert(self.marked(obj), "re-marking object failed!") + # done + toaddr += totaldstsize + # + fromaddr += totalsrcsize + if not translated_to_c(): + assert toaddr - base_forwarding_addr <= fromaddr - self.space + self.num_alive_objs = num + self.finaladdr = toaddr # now update references self.root_walker.walk_roots( - MarkCompactGC._update_root, # stack roots - MarkCompactGC._update_root, # static in prebuilt non-gc structures - MarkCompactGC._update_root) # static in prebuilt gc objects + MarkCompactGC._update_ref, # stack roots + MarkCompactGC._update_ref, # static in prebuilt non-gc structures + MarkCompactGC._update_ref) # static in prebuilt gc objects + self.walk_marked_objects(MarkCompactGC.trace_and_update_ref) + + def walk_marked_objects(self, callback): + num = 0 + size_gc_header = self.gcheaderbuilder.size_gc_header fromaddr = self.space - i = 0 + toaddr = self.base_forwarding_addr while fromaddr < self.free: hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR)) obj = fromaddr + size_gc_header - objsize = self.get_size_from_backup(obj, i) - totalsize = size_gc_header + objsize - if not self.surviving(obj): - pass + survives = self.marked(obj) + if survives: + typeid = self.get_typeid_from_backup(num) + num += 1 else: - self.trace_with_backup(obj, self._update_ref, i) - fromaddr += totalsize - i += 1 - return toaddr + typeid = self.get_type_id(obj) + baseobjsize = self._get_size_for_typeid(obj, typeid) + basesize = size_gc_header + baseobjsize + totalsrcsize = basesize + # + if survives: + grow_hash_field = False + if hdr.tid & GCFLAG_SAVED_HASHFIELD: + totalsrcsize += llmemory.sizeof(lltype.Signed) + totaldstsize = totalsrcsize + if (hdr.tid & (GCFLAG_SAVED_HASHTAKEN|GCFLAG_SAVED_HASHFIELD) + == GCFLAG_SAVED_HASHTAKEN): + if self.toaddr_smaller_than_fromaddr(toaddr, fromaddr): + grow_hash_field = True + totaldstsize += llmemory.sizeof(lltype.Signed) + callback(self, obj, typeid, basesize, toaddr, grow_hash_field) + toaddr += totaldstsize + else: + if hdr.tid & GCFLAG_HASHFIELD: + totalsrcsize += llmemory.sizeof(lltype.Signed) + # + fromaddr += totalsrcsize + walk_marked_objects._annspecialcase_ = 'specialize:arg(1)' - def trace_with_backup(self, obj, callback, arg): + def trace_and_update_ref(self, obj, typeid, _1, _2, _3): """Enumerate the locations inside the given obj that can contain GC pointers. For each such location, callback(pointer, arg) is called, where 'pointer' is an address inside the object. Typically, 'callback' is a bound method and 'arg' can be None. """ - typeid = self.get_typeid_from_backup(arg) if self.is_gcarrayofgcptr(typeid): # a performance shortcut for GcArray(gcptr) length = (obj + llmemory.gcarrayofptr_lengthoffset).signed[0] item = obj + llmemory.gcarrayofptr_itemsoffset while length > 0: - if self.points_to_valid_gc_object(item): - callback(item, arg) + self._update_ref(item) item += llmemory.gcarrayofptr_singleitemoffset length -= 1 return @@ -444,8 +517,7 @@ i = 0 while i < len(offsets): item = obj + offsets[i] - if self.points_to_valid_gc_object(item): - callback(item, arg) + self._update_ref(item) i += 1 if self.has_gcptr_in_varsize(typeid): item = obj + self.varsize_offset_to_variable_part(typeid) @@ -456,171 +528,122 @@ j = 0 while j < len(offsets): itemobj = item + offsets[j] - if self.points_to_valid_gc_object(itemobj): - callback(itemobj, arg) + self._update_ref(itemobj) j += 1 item += itemlength length -= 1 - trace_with_backup._annspecialcase_ = 'specialize:arg(2)' - - def _update_root(self, pointer): - if pointer.address[0] != NULL: - pointer.address[0] = self.get_forwarding_address(pointer.address[0]) - - def _update_ref(self, pointer, ignore): - if pointer.address[0] != NULL: - pointer.address[0] = self.get_forwarding_address(pointer.address[0]) + else: + weakofs = self.weakpointer_offset(typeid) + if weakofs >= 0: + self._update_weakref(obj + weakofs) + + def _update_ref(self, pointer): + if self.points_to_valid_gc_object(pointer): + pointer.address[0] = self.get_forwarding_address( + pointer.address[0]) + + def _update_weakref(self, pointer): + # either update the weak pointer's destination, or + # if it dies, write a NULL + if self.points_to_valid_gc_object(pointer): + if self.marked(pointer.address[0]): + pointer.address[0] = self.get_forwarding_address( + pointer.address[0]) + else: + pointer.address[0] = NULL def _is_external(self, obj): - return not (self.space <= obj < self.top_of_space) + return not (self.space <= obj < self.free) def get_forwarding_address(self, obj): if self._is_external(obj): return obj return self.get_header_forwarded_addr(obj) - def set_null_forwarding_address(self, obj, num): - self.backup_typeid(num, obj) - hdr = self.header(obj) - hdr.tid = -1 # make the object forwarded to NULL - - def set_forwarding_address(self, obj, newobjhdr, num): - self.backup_typeid(num, obj) - forward_offset = newobjhdr - self.base_forwarding_addr - hdr = self.header(obj) - hdr.tid = forward_offset # make the object forwarded to newobj - - def restore_normal_header(self, obj, num): - # Reverse of set_forwarding_address(). - typeid16 = self.get_typeid_from_backup(num) - hdr = self.header_forwarded(obj) - hdr.tid = self.combine(typeid16, 0) # restore the normal header - def get_header_forwarded_addr(self, obj): - return (self.base_forwarding_addr + - self.header_forwarded(obj).tid + - self.gcheaderbuilder.size_gc_header) + tid = self.header_forwarded(obj).tid + ll_assert(tid & GCFLAG_MARKBIT != 0, "dying object is not forwarded") + GCFLAG_MASK = ~(GCFLAG_MARKBIT | 3) + res = (self.base_forwarding_addr + (tid & GCFLAG_MASK) + + self.gcheaderbuilder.size_gc_header) + ll_assert(res < self.finaladdr, "forwarded address >= self.finaladdr") + return res def surviving(self, obj): - return self._is_external(obj) or self.header_forwarded(obj).tid != -1 - - def backup_typeid(self, num, obj): - self.tid_backup[num] = self.get_type_id(obj) + return self.marked(obj) def get_typeid_from_backup(self, num): return self.tid_backup[num] - def get_size_from_backup(self, obj, num): - typeid = self.get_typeid_from_backup(num) - size = self.fixed_size(typeid) - if self.is_varsize(typeid): - lenaddr = obj + self.varsize_offset_to_length(typeid) - length = lenaddr.signed[0] - size += length * self.varsize_item_sizes(typeid) - size = llarena.round_up_for_allocation(size) - # XXX maybe we should parametrize round_up_for_allocation() - # per GC; if we do, we also need to fix the call in - # gctypelayout.encode_type_shape() - return size + def compact(self): + self.walk_marked_objects(MarkCompactGC.copy_and_compact) - def compact(self, resizing): - fromaddr = self.space - size_gc_header = self.gcheaderbuilder.size_gc_header - start = fromaddr - end = fromaddr - num = 0 - while fromaddr < self.free: - obj = fromaddr + size_gc_header - objsize = self.get_size_from_backup(obj, num) - totalsize = size_gc_header + objsize - if not self.surviving(obj): - # this object dies. Following line is a noop in C, - # we clear it to make debugging easier - llarena.arena_reset(fromaddr, totalsize, False) - else: - if resizing: - end = fromaddr - forward_obj = self.get_header_forwarded_addr(obj) - self.restore_normal_header(obj, num) - if obj != forward_obj: - #llop.debug_print(lltype.Void, "Copying from to", - # fromaddr, forward_ptr, totalsize) - llmemory.raw_memmove(fromaddr, - forward_obj - size_gc_header, - totalsize) - if resizing and end - start > GC_CLEARANCE: - diff = end - start - #llop.debug_print(lltype.Void, "Cleaning", start, diff) - diff = (diff / GC_CLEARANCE) * GC_CLEARANCE - #llop.debug_print(lltype.Void, "Cleaning", start, diff) - end = start + diff - if we_are_translated(): - # XXX wuaaaaa.... those objects are freed incorrectly - # here in case of test_gc - llarena.arena_reset(start, diff, True) - start += diff - num += 1 - fromaddr += totalsize + def copy_and_compact(self, obj, typeid, basesize, toaddr, grow_hash_field): + # 'basesize' is the size without any hash field + # restore the normal header + hdr = self.header_forwarded(obj) + gcflags = hdr.tid & 3 + if grow_hash_field: + gcflags |= GCFLAG_SAVED_HASHFIELD + hashvalue = self.get_identityhash_from_addr(obj) + elif gcflags & GCFLAG_SAVED_HASHFIELD: + fromaddr = llarena.getfakearenaaddress(obj) + fromaddr -= self.gcheaderbuilder.size_gc_header + hashvalue = (fromaddr + basesize).signed[0] + else: + hashvalue = 0 # not used + # + hdr.tid = self.combine(typeid, gcflags << first_gcflag_bit) + # + fromaddr = obj - self.gcheaderbuilder.size_gc_header + if translated_to_c(): + llmemory.raw_memmove(fromaddr, toaddr, basesize) + else: + llmemory.raw_memcopy(fromaddr, toaddr, basesize) + # + if gcflags & GCFLAG_SAVED_HASHFIELD: + (toaddr + basesize).signed[0] = hashvalue def debug_check_object(self, obj): - # not sure what to check here - pass - - def mark_objects_with_finalizers(self): + type_id = self.get_type_id(obj) + self.has_gcptr_in_varsize(type_id) # checks that the type_id is valid + # + tid = self.header(obj).tid + if self._is_external(obj): + # All external objects have GCFLAG_MARKBIT and GCFLAG_HASHTAKEN + # set. + assert tid & GCFLAG_MARKBIT + assert tid & GCFLAG_HASHTAKEN + else: + # Non-external objects have GCFLAG_MARKBIT that should not be set + # at the very start or at the very end of a collection -- only + # temporarily during the collection. + assert tid & GCFLAG_MARKBIT == 0 + + def trace_from_objects_with_finalizers(self): + if self.run_finalizers.non_empty(): # uncommon case + new_run_finalizers = self.AddressDeque() + while self.run_finalizers.non_empty(): + x = self.run_finalizers.popleft() + self.mark(x) + new_run_finalizers.append(x) + self.run_finalizers.delete() + self.run_finalizers = new_run_finalizers + # + # xxx we get to run the finalizers in a random order + self._trace_and_mark() new_with_finalizers = self.AddressDeque() - run_finalizers = self.run_finalizers - new_run_finalizers = self.AddressDeque() - while run_finalizers.non_empty(): - x = run_finalizers.popleft() - self.mark(x) - self.to_see.append(x) - new_run_finalizers.append(x) - run_finalizers.delete() - self.run_finalizers = new_run_finalizers while self.objects_with_finalizers.non_empty(): x = self.objects_with_finalizers.popleft() if self.marked(x): new_with_finalizers.append(x) else: - new_run_finalizers.append(x) + self.run_finalizers.append(x) self.mark(x) - self.to_see.append(x) + self._trace_and_mark() self.objects_with_finalizers.delete() self.objects_with_finalizers = new_with_finalizers - def invalidate_weakrefs(self, weakref_offsets): - # walk over list of objects that contain weakrefs - # if the object it references survives then update the weakref - # otherwise invalidate the weakref - new_with_weakref = self.AddressStack() - i = 0 - while self.objects_with_weakrefs.non_empty(): - obj = self.objects_with_weakrefs.pop() - if not self.surviving(obj): - continue # weakref itself dies - newobj = self.get_forwarding_address(obj) - offset = weakref_offsets[i] - pointing_to = (obj + offset).address[0] - # XXX I think that pointing_to cannot be NULL here - if pointing_to: - if self.surviving(pointing_to): - (obj + offset).address[0] = self.get_forwarding_address( - pointing_to) - new_with_weakref.append(newobj) - else: - (obj + offset).address[0] = NULL - i += 1 - self.objects_with_weakrefs.delete() - self.objects_with_weakrefs = new_with_weakref - lltype.free(weakref_offsets, flavor='raw') - - def get_size_incl_hash(self, obj): - size = self.get_size(obj) - hdr = self.header(obj) - if hdr.tid & GCFLAG_HASHFIELD: - size += llmemory.sizeof(lltype.Signed) - return size - def identityhash(self, gcobj): # Unlike SemiSpaceGC.identityhash(), this function does not have # to care about reducing top_of_space. The reason is as @@ -635,8 +658,23 @@ hdr = self.header(obj) # if hdr.tid & GCFLAG_HASHFIELD: # the hash is in a field at the end - obj += self.get_size(obj) + obj = llarena.getfakearenaaddress(obj) + self.get_size(obj) return obj.signed[0] # hdr.tid |= GCFLAG_HASHTAKEN - return llmemory.cast_adr_to_int(obj) # direct case + return self.get_identityhash_from_addr(obj) + + def get_identityhash_from_addr(self, obj): + if translated_to_c(): + return llmemory.cast_adr_to_int(obj) # direct case + else: + try: + adr = llarena.getfakearenaaddress(obj) # -> arena address + except RuntimeError: + return llmemory.cast_adr_to_int(obj) # not in an arena... + return adr - self.space + +# ____________________________________________________________ + +class CannotAllocateGCArena(Exception): + pass Modified: pypy/branch/jit-bounds/pypy/rpython/memory/gc/test/test_direct.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/memory/gc/test/test_direct.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/memory/gc/test/test_direct.py Tue Sep 7 19:22:31 2010 @@ -67,7 +67,10 @@ from pypy.config.pypyoption import get_pypy_config config = get_pypy_config(translating=True).translation self.stackroots = [] - self.gc = self.GCClass(config, **self.GC_PARAMS) + GC_PARAMS = self.GC_PARAMS.copy() + if hasattr(meth, 'GC_PARAMS'): + GC_PARAMS.update(meth.GC_PARAMS) + self.gc = self.GCClass(config, **GC_PARAMS) self.gc.DEBUG = True self.rootwalker = DirectRootWalker(self) self.gc.set_root_walker(self.rootwalker) @@ -96,7 +99,7 @@ p[index] = newvalue def malloc(self, TYPE, n=None): - addr = self.gc.malloc(self.get_type_id(TYPE), n) + addr = self.gc.malloc(self.get_type_id(TYPE), n, zero=True) return llmemory.cast_adr_to_ptr(addr, lltype.Ptr(TYPE)) def test_simple(self): @@ -311,7 +314,18 @@ print hash assert isinstance(hash, (int, long)) assert hash == self.gc.identityhash(p_const) - + # (5) p is actually moving (for the markcompact gc) + p0 = self.malloc(S) + self.stackroots.append(p0) + p = self.malloc(S) + self.stackroots.append(p) + hash = self.gc.identityhash(p) + self.stackroots.pop(-2) + self.gc.collect() # p0 goes away, p shifts left + assert hash == self.gc.identityhash(self.stackroots[-1]) + self.gc.collect() + assert hash == self.gc.identityhash(self.stackroots[-1]) + self.stackroots.pop() class TestSemiSpaceGC(DirectGCTest): from pypy.rpython.memory.gc.semispace import SemiSpaceGC as GCClass @@ -431,3 +445,14 @@ class TestMarkCompactGC(DirectGCTest): from pypy.rpython.memory.gc.markcompact import MarkCompactGC as GCClass + def test_many_objects(self): + DirectGCTest.test_many_objects(self) + test_many_objects.GC_PARAMS = {'space_size': 3 * 1024 * WORD} + + def test_varsized_from_stack(self): + DirectGCTest.test_varsized_from_stack(self) + test_varsized_from_stack.GC_PARAMS = {'space_size': 2 * 1024 * WORD} + + def test_varsized_from_prebuilt_gc(self): + DirectGCTest.test_varsized_from_prebuilt_gc(self) + test_varsized_from_prebuilt_gc.GC_PARAMS = {'space_size': 3 * 1024 * WORD} Modified: pypy/branch/jit-bounds/pypy/rpython/memory/gctypelayout.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/memory/gctypelayout.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/memory/gctypelayout.py Tue Sep 7 19:22:31 2010 @@ -44,16 +44,18 @@ self.type_info_group_ptr = type_info_group._as_ptr() def get(self, typeid): - _check_typeid(typeid) - return llop.get_group_member(GCData.TYPE_INFO_PTR, - self.type_info_group_ptr, - typeid) + res = llop.get_group_member(GCData.TYPE_INFO_PTR, + self.type_info_group_ptr, + typeid) + _check_valid_type_info(res) + return res def get_varsize(self, typeid): - _check_typeid(typeid) - return llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR, - self.type_info_group_ptr, - typeid) + res = llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR, + self.type_info_group_ptr, + typeid) + _check_valid_type_info_varsize(res) + return res def q_is_varsize(self, typeid): infobits = self.get(typeid).infobits @@ -115,13 +117,24 @@ # the lowest 16bits are used to store group member index -T_MEMBER_INDEX = 0xffff +T_MEMBER_INDEX = 0xffff T_IS_VARSIZE = 0x10000 T_HAS_GCPTR_IN_VARSIZE = 0x20000 T_IS_GCARRAY_OF_GCPTR = 0x40000 T_IS_WEAKREF = 0x80000 +T_KEY_MASK = 0xFF000000 +T_KEY_VALUE = 0x7A000000 # bug detection only -def _check_typeid(typeid): +def _check_valid_type_info(p): + ll_assert(p.infobits & T_KEY_MASK == T_KEY_VALUE, "invalid type_id") + +def _check_valid_type_info_varsize(p): + ll_assert(p.header.infobits & (T_KEY_MASK | T_IS_VARSIZE) == + (T_KEY_VALUE | T_IS_VARSIZE), + "invalid varsize type_id") + +def check_typeid(typeid): + # xxx does not perform a full check of validity, just checks for nonzero ll_assert(llop.is_group_member_nonzero(lltype.Bool, typeid), "invalid type_id") @@ -165,9 +178,9 @@ infobits |= T_HAS_GCPTR_IN_VARSIZE varinfo.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF) varinfo.varitemsize = llmemory.sizeof(ARRAY.OF) - if TYPE == WEAKREF: + if builder.is_weakref_type(TYPE): infobits |= T_IS_WEAKREF - info.infobits = infobits + info.infobits = infobits | T_KEY_VALUE # ____________________________________________________________ @@ -250,17 +263,21 @@ _, TYPE = TYPE._first_struct() def get_info(self, type_id): - return llop.get_group_member(GCData.TYPE_INFO_PTR, - self.type_info_group._as_ptr(), - type_id) + res = llop.get_group_member(GCData.TYPE_INFO_PTR, + self.type_info_group._as_ptr(), + type_id) + _check_valid_type_info(res) + return res def get_info_varsize(self, type_id): - return llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR, - self.type_info_group._as_ptr(), - type_id) + res = llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR, + self.type_info_group._as_ptr(), + type_id) + _check_valid_type_info_varsize(res) + return res - def is_weakref(self, type_id): - return self.get_info(type_id).infobits & T_IS_WEAKREF + def is_weakref_type(self, TYPE): + return TYPE == WEAKREF def encode_type_shapes_now(self): if not self.can_encode_type_shape: Modified: pypy/branch/jit-bounds/pypy/rpython/memory/gcwrapper.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/memory/gcwrapper.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/memory/gcwrapper.py Tue Sep 7 19:22:31 2010 @@ -119,6 +119,9 @@ else: return True + def pyobjectptr(self, klass): + raise NotImplementedError(klass) + # ____________________________________________________________ class LLInterpRootWalker: Modified: pypy/branch/jit-bounds/pypy/rpython/memory/test/test_gc.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/memory/test/test_gc.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/memory/test/test_gc.py Tue Sep 7 19:22:31 2010 @@ -639,12 +639,14 @@ class TestMarkCompactGC(TestSemiSpaceGC): from pypy.rpython.memory.gc.markcompact import MarkCompactGC as GCClass + GC_PARAMS = {'space_size': 65536+16384} + GC_CAN_SHRINK_ARRAY = False def test_finalizer_order(self): py.test.skip("Not implemented yet") - -class TestMarkCompactGCGrowing(TestMarkCompactGC): - GC_PARAMS = {'space_size': 16*WORD} + def test_writebarrier_before_copy(self): + py.test.skip("Not relevant, and crashes because llarena does not " + "support empty GcStructs") class TestHybridGC(TestGenerationalGC): from pypy.rpython.memory.gc.hybrid import HybridGC as GCClass Modified: pypy/branch/jit-bounds/pypy/rpython/memory/test/test_gctypelayout.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/memory/test/test_gctypelayout.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/memory/test/test_gctypelayout.py Tue Sep 7 19:22:31 2010 @@ -101,7 +101,7 @@ accessor = rclass.FieldListAccessor() S3 = lltype.GcStruct('S', ('x', PT), ('y', PT), hints={'immutable_fields': accessor}) - accessor.initialize(S3, ['x']) + accessor.initialize(S3, {'x': ''}) # s1 = lltype.malloc(S1) adr = llmemory.cast_ptr_to_adr(s1) Modified: pypy/branch/jit-bounds/pypy/rpython/memory/test/test_transformed_gc.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/memory/test/test_transformed_gc.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/memory/test/test_transformed_gc.py Tue Sep 7 19:22:31 2010 @@ -1138,15 +1138,16 @@ class TestMarkCompactGC(GenericMovingGCTests): gcname = 'markcompact' - def setup_class(cls): - py.test.skip("Disabled for now, sorry") - class gcpolicy(gc.FrameworkGcPolicy): class transformerclass(framework.FrameworkGCTransformer): from pypy.rpython.memory.gc.markcompact import MarkCompactGC as GCClass - GC_PARAMS = {'space_size': 512*WORD} + GC_PARAMS = {'space_size': 4096*WORD} root_stack_depth = 200 + def test_writebarrier_before_copy(self): + py.test.skip("Not relevant, and crashes because llarena does not " + "support empty GcStructs") + class TestGenerationGC(GenericMovingGCTests): gcname = "generation" GC_CAN_SHRINK_ARRAY = True @@ -1536,3 +1537,12 @@ GC_PARAMS = {'space_size': 512*WORD, 'nursery_size': 32*WORD} root_stack_depth = 200 + +class TestMarkCompactTaggedpointerGC(TaggedPointerGCTests): + gcname = 'markcompact' + + class gcpolicy(gc.FrameworkGcPolicy): + class transformerclass(framework.FrameworkGCTransformer): + from pypy.rpython.memory.gc.markcompact import MarkCompactGC as GCClass + GC_PARAMS = {'space_size': 4096*WORD} + root_stack_depth = 200 Modified: pypy/branch/jit-bounds/pypy/rpython/module/ll_time.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/module/ll_time.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/module/ll_time.py Tue Sep 7 19:22:31 2010 @@ -9,6 +9,7 @@ from pypy.rpython.lltypesystem import lltype from pypy.rpython.extfunc import BaseLazyRegistering, registering, extdef from pypy.rlib import rposix +from pypy.rlib.rarithmetic import intmask from pypy.translator.tool.cbuild import ExternalCompilationInfo if sys.platform == 'win32': @@ -119,7 +120,8 @@ if self.HAVE_FTIME: t = lltype.malloc(self.TIMEB, flavor='raw') c_ftime(t) - result = float(int(t.c_time)) + float(int(t.c_millitm)) * 0.001 + result = (float(intmask(t.c_time)) + + float(intmask(t.c_millitm)) * 0.001) lltype.free(t, flavor='raw') return result return float(c_time(void)) Modified: pypy/branch/jit-bounds/pypy/rpython/ootypesystem/ootype.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/ootypesystem/ootype.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/ootypesystem/ootype.py Tue Sep 7 19:22:31 2010 @@ -267,6 +267,14 @@ return self._fields_with_default[:] return self._superclass._get_fields_with_default() + self._fields_with_default + def _immutable_field(self, field): + if 'immutable_fields' in self._hints: + try: + s = self._hints['immutable_fields'].fields[field] + return s or True + except KeyError: + pass + return self._hints.get('immutable', False) class SpecializableType(OOType): Modified: pypy/branch/jit-bounds/pypy/rpython/ootypesystem/rclass.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/ootypesystem/rclass.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/ootypesystem/rclass.py Tue Sep 7 19:22:31 2010 @@ -194,6 +194,7 @@ self.lowleveltype._hints.update(hints) if self.classdef is None: + self.fields = {} self.allfields = {} self.allmethods = {} self.allclassattributes = {} @@ -210,6 +211,7 @@ allclassattributes = {} fields = {} + nonmangledfields = [] fielddefaults = {} if llfields: @@ -224,6 +226,7 @@ allfields[mangled] = repr oot = repr.lowleveltype fields[mangled] = oot + nonmangledfields.append(name) try: value = self.classdef.classdesc.read_attribute(name) fielddefaults[mangled] = repr.convert_desc_or_const(value) @@ -294,6 +297,7 @@ if not attrdef.s_value.is_constant(): classattributes[mangled] = attrdef.s_value, value + self.fields = nonmangledfields self.allfields = allfields self.allmethods = allmethods self.allclassattributes = allclassattributes Modified: pypy/branch/jit-bounds/pypy/rpython/rbuiltin.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/rbuiltin.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/rbuiltin.py Tue Sep 7 19:22:31 2010 @@ -542,16 +542,25 @@ return hop.genop('raw_malloc_usage', [v_size], resulttype=lltype.Signed) def rtype_raw_free(hop): + s_addr = hop.args_s[0] + if s_addr.is_null_address(): + raise TyperError("raw_free(x) where x is the constant NULL") v_addr, = hop.inputargs(llmemory.Address) hop.exception_cannot_occur() return hop.genop('raw_free', [v_addr]) def rtype_raw_memcopy(hop): + for s_addr in hop.args_s[:2]: + if s_addr.is_null_address(): + raise TyperError("raw_memcopy() with a constant NULL") v_list = hop.inputargs(llmemory.Address, llmemory.Address, lltype.Signed) hop.exception_cannot_occur() return hop.genop('raw_memcopy', v_list) def rtype_raw_memclear(hop): + s_addr = hop.args_s[0] + if s_addr.is_null_address(): + raise TyperError("raw_memclear(x, n) where x is the constant NULL") v_list = hop.inputargs(llmemory.Address, lltype.Signed) return hop.genop('raw_memclear', v_list) Modified: pypy/branch/jit-bounds/pypy/rpython/rclass.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/rclass.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/rclass.py Tue Sep 7 19:22:31 2010 @@ -9,6 +9,7 @@ class FieldListAccessor(object): def initialize(self, TYPE, fields): + assert type(fields) is dict self.TYPE = TYPE self.fields = fields @@ -18,6 +19,10 @@ def _freeze_(self): return True +class ImmutableConflictError(Exception): + """Raised when the _immutable_ or _immutable_fields_ hints are + not consistent across a class hierarchy.""" + def getclassrepr(rtyper, classdef): try: @@ -153,7 +158,7 @@ pass def _check_for_immutable_hints(self, hints): - if '_immutable_' in self.classdef.classdesc.classdict: + if self.classdef.classdesc.lookup('_immutable_') is not None: hints = hints.copy() hints['immutable'] = True self.immutable_field_list = [] # unless overwritten below @@ -182,16 +187,20 @@ return 'InstanceR %s' % (clsname,) def _setup_repr_final(self): + self._setup_immutable_field_list() + self._check_for_immutable_conflicts() + + def _setup_immutable_field_list(self): hints = self.object_type._hints if "immutable_fields" in hints: accessor = hints["immutable_fields"] - immutable_fields = {} - rbase = self - while rbase.classdef is not None: - immutable_fields.update( - dict.fromkeys(rbase.immutable_field_list)) - rbase = rbase.rbase - self._parse_field_list(immutable_fields, accessor) + if not hasattr(accessor, 'fields'): + immutable_fields = [] + rbase = self + while rbase.classdef is not None: + immutable_fields += rbase.immutable_field_list + rbase = rbase.rbase + self._parse_field_list(immutable_fields, accessor) def _parse_field_list(self, fields, accessor): with_suffix = {} @@ -209,6 +218,36 @@ accessor.initialize(self.object_type, with_suffix) return with_suffix + def _check_for_immutable_conflicts(self): + # check for conflicts, i.e. a field that is defined normally as + # mutable in some parent class but that is now declared immutable + from pypy.rpython.lltypesystem.lltype import Void + is_self_immutable = "immutable" in self.object_type._hints + base = self + while base.classdef is not None: + base = base.rbase + for fieldname in base.fields: + try: + mangled, r = base._get_field(fieldname) + except KeyError: + continue + if r.lowleveltype == Void: + continue + base._setup_immutable_field_list() + if base.object_type._immutable_field(mangled): + continue + # 'fieldname' is a mutable, non-Void field in the parent + if is_self_immutable: + raise ImmutableConflictError( + "class %r has _immutable_=True, but parent class %r " + "defines (at least) the mutable field %r" % ( + self, base, fieldname)) + if fieldname in self.immutable_field_list: + raise ImmutableConflictError( + "field %r is defined mutable in class %r, but " + "listed in _immutable_fields_ in subclass %r" % ( + fieldname, base, self)) + def new_instance(self, llops, classcallhop=None): raise NotImplementedError Modified: pypy/branch/jit-bounds/pypy/rpython/test/test_rclass.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/test/test_rclass.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/test/test_rclass.py Tue Sep 7 19:22:31 2010 @@ -796,27 +796,92 @@ assert accessor.fields == {"inst_y" : ""} or \ accessor.fields == {"oy" : ""} # for ootype - def test_immutable_inheritance(self): - class I(object): - def __init__(self, v): - self.v = v - - class J(I): + def test_immutable_forbidden_inheritance_1(self): + from pypy.rpython.rclass import ImmutableConflictError + class A(object): + pass + class B(A): + _immutable_fields_ = ['v'] + def f(): + A().v = 123 + B() # crash: class B says 'v' is immutable, + # but it is defined on parent class A + py.test.raises(ImmutableConflictError, self.gengraph, f, []) + + def test_immutable_forbidden_inheritance_2(self): + from pypy.rpython.rclass import ImmutableConflictError + class A(object): + pass + class B(A): + _immutable_ = True + def f(): + A().v = 123 + B() # crash: class B has _immutable_ = True + # but class A defines 'v' to be mutable + py.test.raises(ImmutableConflictError, self.gengraph, f, []) + + def test_immutable_ok_inheritance_2(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + _immutable_fields_ = ['v'] + class B(A): + _immutable_ = True + def f(): + A().v = 123 + B().w = 456 + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] + try: + A_TYPE = B_TYPE.super + except AttributeError: + A_TYPE = B_TYPE._superclass # for ootype + accessor = A_TYPE._hints["immutable_fields"] + assert accessor.fields == {"inst_v" : ""} or \ + accessor.fields == {"ov" : ""} # for ootype + + def test_immutable_subclass_1(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + _immutable_ = True + class B(A): + pass + def f(): + B().v = 123 + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] # inherited from A + + def test_immutable_subclass_2(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + pass + class B(A): _immutable_ = True - def __init__(self, v, w): - self.w = w - I.__init__(self, v) - - j = J(3, 4) - def f(): - j.v = j.v * 1 # make the annotator think it is mutated - j.w = j.w * 1 # make the annotator think it is mutated - return j.v + j.w - - t, typer, graph = self.gengraph(f, [], backendopt=True) - f_summary = summary(graph) - assert f_summary == {"setfield": 2} or \ - f_summary == {"oosetfield": 2} # for ootype + def f(): + B().v = 123 + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] + + def test_immutable_subclass_void(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + pass + class B(A): + _immutable_ = True + def myfunc(): + pass + def f(): + A().f = myfunc # it's ok to add Void attributes to A + B().v = 123 # even though only B is declared _immutable_ + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] class TestLLtype(BaseTestRclass, LLRtypeMixin): Modified: pypy/branch/jit-bounds/pypy/translator/backendopt/test/test_constfold.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/backendopt/test/test_constfold.py (original) +++ pypy/branch/jit-bounds/pypy/translator/backendopt/test/test_constfold.py Tue Sep 7 19:22:31 2010 @@ -49,7 +49,7 @@ accessor = rclass.FieldListAccessor() S2 = lltype.GcStruct('S2', ('x', lltype.Signed), hints={'immutable_fields': accessor}) - accessor.initialize(S2, ['x']) + accessor.initialize(S2, {'x': ''}) test_simple(S2) Modified: pypy/branch/jit-bounds/pypy/translator/c/database.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/c/database.py (original) +++ pypy/branch/jit-bounds/pypy/translator/c/database.py Tue Sep 7 19:22:31 2010 @@ -213,7 +213,7 @@ forcename = self.idelayedfunctionnames[obj][0] node = self.getcontainernode(container, forcename=forcename) - assert node.ptrname == forcename + assert node.getptrname() == forcename return forcename # /hack hack hack @@ -222,7 +222,7 @@ return '((%s) %d)' % (cdecl(self.gettype(T), ''), obj._obj) node = self.getcontainernode(container) - return node.ptrname + return node.getptrname() else: return '((%s) NULL)' % (cdecl(self.gettype(T), ''), ) else: Modified: pypy/branch/jit-bounds/pypy/translator/c/gc.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/c/gc.py (original) +++ pypy/branch/jit-bounds/pypy/translator/c/gc.py Tue Sep 7 19:22:31 2010 @@ -172,7 +172,9 @@ defnode = db.gettypedefnode(obj.about) self.implementationtypename = 'void (@)(void *)' self.name = defnode.gcinfo.static_deallocator - self.ptrname = '((void (*)(void *)) %s)' % (self.name,) + + def getptrname(self): + return '((void (*)(void *)) %s)' % (self.name,) def enum_dependencies(self): return [] @@ -266,7 +268,9 @@ defnode = db.gettypedefnode(obj.about) self.implementationtypename = self.typename self.name = self.db.namespace.uniquename('g_rtti_v_'+ defnode.barename) - self.ptrname = '(&%s)' % (self.name,) + + def getptrname(self): + return '(&%s)' % (self.name,) def enum_dependencies(self): return [] Modified: pypy/branch/jit-bounds/pypy/translator/c/gcc/trackgcroot.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/c/gcc/trackgcroot.py (original) +++ pypy/branch/jit-bounds/pypy/translator/c/gcc/trackgcroot.py Tue Sep 7 19:22:31 2010 @@ -375,7 +375,7 @@ IGNORE_OPS_WITH_PREFIXES = dict.fromkeys([ 'cmp', 'test', 'set', 'sahf', 'lahf', 'cltd', 'cld', 'std', - 'rep', 'movs', 'lods', 'stos', 'scas', 'cwtl', 'prefetch', + 'rep', 'movs', 'lods', 'stos', 'scas', 'cwtl', 'cwde', 'prefetch', # floating-point operations cannot produce GC pointers 'f', 'cvt', 'ucomi', 'comi', 'subs', 'subp' , 'adds', 'addp', 'xorp', Modified: pypy/branch/jit-bounds/pypy/translator/c/node.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/c/node.py (original) +++ pypy/branch/jit-bounds/pypy/translator/c/node.py Tue Sep 7 19:22:31 2010 @@ -77,6 +77,8 @@ if db.gcpolicy.need_no_typeptr(): assert self.fieldnames == ('typeptr',) self.fieldnames = () + # + self.fulltypename = '%s %s @' % (self.typetag, self.name) def setup(self): # this computes self.fields @@ -119,7 +121,7 @@ gcinfo = defaultproperty(computegcinfo) def gettype(self): - return '%s %s @' % (self.typetag, self.name) + return self.fulltypename def c_struct_field_name(self, name): # occasionally overridden in __init__(): @@ -211,6 +213,8 @@ self.name) = db.namespace.uniquename(basename, with_number=with_number, bare=True) self.dependencies = {} + self.fulltypename = '%s %s @' % (self.typetag, self.name) + self.fullptrtypename = '%s %s *@' % (self.typetag, self.name) def setup(self): if hasattr(self, 'itemtypename'): @@ -236,10 +240,10 @@ gcinfo = defaultproperty(computegcinfo) def gettype(self): - return '%s %s @' % (self.typetag, self.name) + return self.fulltypename def getptrtype(self): - return '%s %s *@' % (self.typetag, self.name) + return self.fullptrtypename def access_expr(self, baseexpr, index): return '%s.items[%s]' % (baseexpr, index) @@ -336,16 +340,19 @@ if ARRAY._hints.get("render_as_void"): contained_type = Void self.itemtypename = db.gettype(contained_type, who_asks=self) + self.fulltypename = self.itemtypename.replace('@', '(@)[%d]' % + (self.varlength,)) + self.fullptrtypename = self.itemtypename.replace('@', '*@') def setup(self): """Array loops are forbidden by ForwardReference.become() because there is no way to declare them in C.""" def gettype(self): - return self.itemtypename.replace('@', '(@)[%d]' % (self.varlength,)) + return self.fulltypename def getptrtype(self): - return self.itemtypename.replace('@', '*@') + return self.fullptrtypename def access_expr(self, baseexpr, index): return '%s[%d]' % (baseexpr, index) @@ -383,17 +390,19 @@ self.LLTYPE = FIXEDARRAY self.dependencies = {} self.itemtypename = db.gettype(FIXEDARRAY.OF, who_asks=self) + self.fulltypename = self.itemtypename.replace('@', '(@)[%d]' % + FIXEDARRAY.length) + self.fullptrtypename = self.itemtypename.replace('@', '*@') def setup(self): """Loops are forbidden by ForwardReference.become() because there is no way to declare them in C.""" def gettype(self): - FIXEDARRAY = self.FIXEDARRAY - return self.itemtypename.replace('@', '(@)[%d]' % FIXEDARRAY.length) + return self.fulltypename def getptrtype(self): - return self.itemtypename.replace('@', '*@') + return self.fullptrtypename def access_expr(self, baseexpr, index, dummy=False): if not isinstance(index, int): @@ -469,7 +478,7 @@ if USESLOTS: # keep the number of slots down! __slots__ = """db obj typename implementationtypename - name ptrname + name globalcontainer""".split() eci_name = '_compilation_info' @@ -494,7 +503,9 @@ if self.typename != self.implementationtypename: if db.gettypedefnode(T).extra_union_for_varlength: self.name += '.b' - self.ptrname = '(&%s)' % self.name + + def getptrname(self): + return '(&%s)' % self.name def getTYPE(self): return typeOf(self.obj) @@ -667,10 +678,10 @@ if USESLOTS: __slots__ = () - def __init__(self, db, T, obj): - ContainerNode.__init__(self, db, T, obj) - if barebonearray(T): - self.ptrname = self.name + def getptrname(self): + if barebonearray(self.getTYPE()): + return self.name + return ContainerNode.getptrname(self) def basename(self): return 'array' @@ -728,10 +739,10 @@ if USESLOTS: __slots__ = () - def __init__(self, db, T, obj): - ContainerNode.__init__(self, db, T, obj) - if not isinstance(obj, _subarray): # XXX hackish - self.ptrname = self.name + def getptrname(self): + if not isinstance(self.obj, _subarray): # XXX hackish + return self.name + return ContainerNode.getptrname(self) def basename(self): T = self.getTYPE() @@ -812,7 +823,9 @@ self.make_funcgens() #self.dependencies = {} self.typename = db.gettype(T) #, who_asks=self) - self.ptrname = self.name + + def getptrname(self): + return self.name def make_funcgens(self): self.funcgens = select_function_code_generators(self.obj, self.db, self.name) @@ -958,7 +971,7 @@ def startupcode(self): T = self.getTYPE() - args = [self.ptrname] + args = [self.getptrname()] # XXX how to make this code more generic? if T.tag == 'ThreadLock': lock = self.obj.externalobj @@ -990,13 +1003,15 @@ self.obj = obj value = obj.value self.name = self._python_c_name(value) - self.ptrname = self.name self.exported_name = self.name # a list of expressions giving places where this constant PyObject # must be copied. Normally just in the global variable of the same # name, but see also StructNode.initializationexpr() :-( self.where_to_copy_me = [] + def getptrname(self): + return self.name + def _python_c_name(self, value): # just some minimal cases: None and builtin exceptions if value is None: Modified: pypy/branch/jit-bounds/pypy/translator/c/test/test_newgc.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/c/test/test_newgc.py (original) +++ pypy/branch/jit-bounds/pypy/translator/c/test/test_newgc.py Tue Sep 7 19:22:31 2010 @@ -67,9 +67,8 @@ if not fullname.startswith('define'): continue keyword = conftest.option.keyword - if keyword: - if keyword.startswith('test_'): - keyword = keyword[len('test_'):] + if keyword.startswith('test_'): + keyword = keyword[len('test_'):] if keyword not in fullname: continue prefix, name = fullname.split('_', 1) @@ -1072,21 +1071,66 @@ should_be_moving = True GC_CAN_SHRINK_ARRAY = False - def setup_class(cls): - py.test.skip("Disabled for now") - def test_gc_set_max_heap_size(self): py.test.skip("not implemented") + def test_gc_heap_stats(self): + py.test.skip("not implemented") + def test_finalizer_order(self): py.test.skip("not implemented") + def define_adding_a_hash(cls): + from pypy.rlib.objectmodel import compute_identity_hash + S1 = lltype.GcStruct('S1', ('x', lltype.Signed)) + S2 = lltype.GcStruct('S2', ('p1', lltype.Ptr(S1)), + ('p2', lltype.Ptr(S1)), + ('p3', lltype.Ptr(S1)), + ('p4', lltype.Ptr(S1)), + ('p5', lltype.Ptr(S1)), + ('p6', lltype.Ptr(S1)), + ('p7', lltype.Ptr(S1)), + ('p8', lltype.Ptr(S1)), + ('p9', lltype.Ptr(S1))) + def g(): + lltype.malloc(S1) # forgotten, will be shifted over + s2 = lltype.malloc(S2) # a big object, overlaps its old position + s2.p1 = lltype.malloc(S1); s2.p1.x = 1010 + s2.p2 = lltype.malloc(S1); s2.p2.x = 1020 + s2.p3 = lltype.malloc(S1); s2.p3.x = 1030 + s2.p4 = lltype.malloc(S1); s2.p4.x = 1040 + s2.p5 = lltype.malloc(S1); s2.p5.x = 1050 + s2.p6 = lltype.malloc(S1); s2.p6.x = 1060 + s2.p7 = lltype.malloc(S1); s2.p7.x = 1070 + s2.p8 = lltype.malloc(S1); s2.p8.x = 1080 + s2.p9 = lltype.malloc(S1); s2.p9.x = 1090 + return s2 + def f(): + rgc.collect() + s2 = g() + h2 = compute_identity_hash(s2) + rgc.collect() # shift s2 to the left, but add a hash field + assert s2.p1.x == 1010 + assert s2.p2.x == 1020 + assert s2.p3.x == 1030 + assert s2.p4.x == 1040 + assert s2.p5.x == 1050 + assert s2.p6.x == 1060 + assert s2.p7.x == 1070 + assert s2.p8.x == 1080 + assert s2.p9.x == 1090 + return h2 - compute_identity_hash(s2) + return f + + def test_adding_a_hash(self): + res = self.run("adding_a_hash") + assert res == 0 + # ____________________________________________________________________ -class TestHybridTaggedPointers(TestHybridGC): +class TaggedPointersTest(object): taggedpointers = True - def define_tagged(cls): class Unrelated(object): pass @@ -1129,3 +1173,10 @@ __slots__ = 'smallint' def meth(self, x): return self.smallint + x + 3 + + +class TestHybridTaggedPointers(TaggedPointersTest, TestHybridGC): + pass + +class TestMarkCompactGCMostCompact(TaggedPointersTest, TestMarkCompactGC): + removetypeptr = True Modified: pypy/branch/jit-bounds/pypy/translator/exceptiontransform.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/exceptiontransform.py (original) +++ pypy/branch/jit-bounds/pypy/translator/exceptiontransform.py Tue Sep 7 19:22:31 2010 @@ -197,7 +197,7 @@ for graph in self.translator.graphs: self.create_exception_handling(graph) - def create_exception_handling(self, graph, always_exc_clear=False): + def create_exception_handling(self, graph): """After an exception in a direct_call (or indirect_call), that is not caught by an explicit except statement, we need to reraise the exception. So after this @@ -212,7 +212,6 @@ self.raise_analyzer.analyze_direct_call(graph) graph.exceptiontransformed = self.exc_data_ptr - self.always_exc_clear = always_exc_clear join_blocks(graph) # collect the blocks before changing them n_need_exc_matching_blocks = 0 @@ -455,13 +454,18 @@ block.recloseblock(l0, l) insert_zeroing_op = False - # XXX this is not right. it also inserts zero_gc_pointers_inside - # XXX on a path that malloc_nonmovable returns null, but does not raise - # XXX which might end up with a segfault. But we don't have such gc now - if spaceop.opname == 'malloc' or spaceop.opname == 'malloc_nonmovable': + if spaceop.opname == 'malloc': flavor = spaceop.args[1].value['flavor'] if flavor == 'gc': insert_zeroing_op = True + elif spaceop.opname == 'malloc_nonmovable': + # xxx we cannot insert zero_gc_pointers_inside after + # malloc_nonmovable, because it can return null. For now + # we simply always force the zero=True flag on + # malloc_nonmovable. + c_flags = spaceop.args[1] + c_flags.value = c_flags.value.copy() + spaceop.args[1].value['zero'] = True if insert_zeroing_op: if normalafterblock is None: @@ -479,16 +483,6 @@ [v_result_after], varoftype(lltype.Void))) - if self.always_exc_clear: - # insert code that clears the exception even in the non-exceptional - # case... this is a hint for the JIT, but pointless otherwise - if normalafterblock is None: - normalafterblock = insert_empty_block(None, l0) - llops = rtyper.LowLevelOpList(None) - self.gen_setfield('exc_value', self.c_null_evalue, llops) - self.gen_setfield('exc_type', self.c_null_etype, llops) - normalafterblock.operations[:0] = llops - class LLTypeExceptionTransformer(BaseExceptionTransformer): From fijal at codespeak.net Wed Sep 8 08:50:18 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 8 Sep 2010 08:50:18 +0200 (CEST) Subject: [pypy-svn] r76928 - pypy/trunk/pypy/rlib Message-ID: <20100908065018.70FD036C227@codespeak.net> Author: fijal Date: Wed Sep 8 08:50:16 2010 New Revision: 76928 Modified: pypy/trunk/pypy/rlib/_rsocket_rffi.py Log: on posix, define sockaddr_ll Modified: pypy/trunk/pypy/rlib/_rsocket_rffi.py ============================================================================== --- pypy/trunk/pypy/rlib/_rsocket_rffi.py (original) +++ pypy/trunk/pypy/rlib/_rsocket_rffi.py Wed Sep 8 08:50:16 2010 @@ -32,6 +32,7 @@ 'arpa/inet.h', 'stdint.h', 'errno.h', + 'netpacket/packet.h' ) cond_includes = [('AF_NETLINK', 'linux/netlink.h')] libraries = () @@ -273,7 +274,7 @@ ('nl_pid', rffi.INT), ('nl_groups', rffi.INT)], ifdef='AF_NETLINK') - + CConfig.addrinfo = platform.Struct('struct addrinfo', [('ai_flags', rffi.INT), ('ai_family', rffi.INT), @@ -309,6 +310,15 @@ [('fd', socketfd_type), ('events', rffi.SHORT), ('revents', rffi.SHORT)]) + + CConfig.sockaddr_ll = platform.Struct('struct sockaddr_ll', + [('sll_protocol', rffi.INT), + ('sll_pkttype', rffi.INT), + ('sll_hatype', rffi.INT), + ('sll_addr', CCHARP), + ('sll_halen', rffi.INT)], + ) + if _WIN32: CConfig.WSAEVENT = platform.SimpleType('WSAEVENT', rffi.VOIDP) CConfig.WSANETWORKEVENTS = platform.Struct( From arigo at codespeak.net Wed Sep 8 09:54:01 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 8 Sep 2010 09:54:01 +0200 (CEST) Subject: [pypy-svn] r76929 - pypy/trunk/pypy/rpython/memory Message-ID: <20100908075401.712C1282BD6@codespeak.net> Author: arigo Date: Wed Sep 8 09:53:57 2010 New Revision: 76929 Modified: pypy/trunk/pypy/rpython/memory/gctypelayout.py Log: Fix for r76920. Oups, breaks tests a bit all over the place. Modified: pypy/trunk/pypy/rpython/memory/gctypelayout.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gctypelayout.py (original) +++ pypy/trunk/pypy/rpython/memory/gctypelayout.py Wed Sep 8 09:53:57 2010 @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import ll_assert +from pypy.rlib.rarithmetic import intmask from pypy.tool.identity_dict import identity_dict @@ -122,8 +123,8 @@ T_HAS_GCPTR_IN_VARSIZE = 0x20000 T_IS_GCARRAY_OF_GCPTR = 0x40000 T_IS_WEAKREF = 0x80000 -T_KEY_MASK = 0xFF000000 -T_KEY_VALUE = 0x7A000000 # bug detection only +T_KEY_MASK = intmask(0xFF000000) +T_KEY_VALUE = intmask(0x7A000000) # bug detection only def _check_valid_type_info(p): ll_assert(p.infobits & T_KEY_MASK == T_KEY_VALUE, "invalid type_id") From arigo at codespeak.net Wed Sep 8 09:57:28 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 8 Sep 2010 09:57:28 +0200 (CEST) Subject: [pypy-svn] r76930 - pypy/trunk/pypy/rpython/test Message-ID: <20100908075728.4B924282BD6@codespeak.net> Author: arigo Date: Wed Sep 8 09:57:26 2010 New Revision: 76930 Modified: pypy/trunk/pypy/rpython/test/test_rint.py Log: More fixes for r76920. Modified: pypy/trunk/pypy/rpython/test/test_rint.py ============================================================================== --- pypy/trunk/pypy/rpython/test/test_rint.py (original) +++ pypy/trunk/pypy/rpython/test/test_rint.py Wed Sep 8 09:57:26 2010 @@ -117,10 +117,10 @@ assert self.ll_to_string(res) == '413974738222117' def test_unsigned(self): - bigvalue = sys.maxint + 17 + bigvalue = r_uint(sys.maxint + 17) def dummy(i): i = r_uint(i) - j = r_uint(bigvalue) + j = bigvalue return i < j res = self.interpret(dummy,[0]) From arigo at codespeak.net Wed Sep 8 10:06:18 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 8 Sep 2010 10:06:18 +0200 (CEST) Subject: [pypy-svn] r76931 - in pypy/trunk/pypy: config jit/backend/llsupport jit/backend/llsupport/test jit/backend/x86 jit/backend/x86/test module/pypyjit/test rpython/memory/gctransform translator/c/gcc translator/c/gcc/test translator/c/gcc/test/elf64 translator/platform Message-ID: <20100908080618.60512282BD6@codespeak.net> Author: arigo Date: Wed Sep 8 10:06:15 2010 New Revision: 76931 Added: pypy/trunk/pypy/translator/c/gcc/test/elf64/ pypy/trunk/pypy/translator/c/gcc/test/elf64/track_32bit_reg_zeroextend.s pypy/trunk/pypy/translator/c/gcc/test/elf64/track_basic_argument_registers.s pypy/trunk/pypy/translator/c/gcc/test/elf64/track_jumptable.c (contents, props changed) pypy/trunk/pypy/translator/c/gcc/test/elf64/track_jumptable.s pypy/trunk/pypy/translator/c/gcc/test/elf64/track_negative_rsp_offset.s pypy/trunk/pypy/translator/c/gcc/test/elf64/track_varargs_function.s Modified: pypy/trunk/pypy/config/translationoption.py pypy/trunk/pypy/jit/backend/llsupport/gc.py pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py pypy/trunk/pypy/jit/backend/x86/assembler.py pypy/trunk/pypy/jit/backend/x86/regalloc.py pypy/trunk/pypy/jit/backend/x86/regloc.py pypy/trunk/pypy/jit/backend/x86/rx86.py pypy/trunk/pypy/jit/backend/x86/test/test_gc_integration.py pypy/trunk/pypy/jit/backend/x86/test/test_zrpy_gc.py pypy/trunk/pypy/jit/backend/x86/test/test_ztranslation.py pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py pypy/trunk/pypy/rpython/memory/gctransform/asmgcroot.py pypy/trunk/pypy/translator/c/gcc/instruction.py pypy/trunk/pypy/translator/c/gcc/test/conftest.py pypy/trunk/pypy/translator/c/gcc/test/test_trackgcroot.py pypy/trunk/pypy/translator/c/gcc/trackgcroot.py pypy/trunk/pypy/translator/platform/linux.py pypy/trunk/pypy/translator/platform/posix.py Log: Merge branch/asmgcc-64. I *think* that after a couple of minor changes it does not break asmgcc on 32 bits, and it seems to work on 64 bits too. Modified: pypy/trunk/pypy/config/translationoption.py ============================================================================== --- pypy/trunk/pypy/config/translationoption.py (original) +++ pypy/trunk/pypy/config/translationoption.py Wed Sep 8 10:06:15 2010 @@ -343,11 +343,7 @@ } def final_check_config(config): - # For now, 64-bit JIT requires boehm. You have to say it explicitly - # with --gc=boehm, so that you don't get boehm by mistake. - if IS_64_BITS: - if config.translation.jit and config.translation.gc != 'boehm': - raise ConfigError("for now, 64-bit JIT requires --gc=boehm") + pass def set_opt_level(config, level): """Apply optimization suggestions on the 'config'. Modified: pypy/trunk/pypy/jit/backend/llsupport/gc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/llsupport/gc.py (original) +++ pypy/trunk/pypy/jit/backend/llsupport/gc.py Wed Sep 8 10:06:15 2010 @@ -251,13 +251,25 @@ if oldgcmap: lltype.free(oldgcmap, flavor='raw') - def get_basic_shape(self): - return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 4(%ebp) - chr(self.LOC_EBP_MINUS | 4), # saved %ebx: at -4(%ebp) - chr(self.LOC_EBP_MINUS | 8), # saved %esi: at -8(%ebp) - chr(self.LOC_EBP_MINUS | 12), # saved %edi: at -12(%ebp) - chr(self.LOC_EBP_PLUS | 0), # saved %ebp: at (%ebp) - chr(0)] + def get_basic_shape(self, is_64_bit=False): + # XXX: Should this code even really know about stack frame layout of + # the JIT? + if is_64_bit: + return [chr(self.LOC_EBP_PLUS | 8), + chr(self.LOC_EBP_MINUS | 8), + chr(self.LOC_EBP_MINUS | 16), + chr(self.LOC_EBP_MINUS | 24), + chr(self.LOC_EBP_MINUS | 32), + chr(self.LOC_EBP_MINUS | 40), + chr(self.LOC_EBP_PLUS | 0), + chr(0)] + else: + return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 4(%ebp) + chr(self.LOC_EBP_MINUS | 4), # saved %ebx: at -4(%ebp) + chr(self.LOC_EBP_MINUS | 8), # saved %esi: at -8(%ebp) + chr(self.LOC_EBP_MINUS | 12), # saved %edi: at -12(%ebp) + chr(self.LOC_EBP_PLUS | 0), # saved %ebp: at (%ebp) + chr(0)] def _encode_num(self, shape, number): assert number >= 0 @@ -276,17 +288,9 @@ num = self.LOC_EBP_MINUS | (-offset) self._encode_num(shape, num) - def add_ebx(self, shape): - shape.append(chr(self.LOC_REG | 4)) - - def add_esi(self, shape): - shape.append(chr(self.LOC_REG | 8)) - - def add_edi(self, shape): - shape.append(chr(self.LOC_REG | 12)) - - def add_ebp(self, shape): - shape.append(chr(self.LOC_REG | 16)) + def add_callee_save_reg(self, shape, reg_index): + assert reg_index > 0 + shape.append(chr(self.LOC_REG | (reg_index << 2))) def compress_callshape(self, shape): # Similar to compress_callshape() in trackgcroot.py. Modified: pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py (original) +++ pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py Wed Sep 8 10:06:15 2010 @@ -73,16 +73,16 @@ gcrootmap.add_ebp_offset(shape, num1) gcrootmap.add_ebp_offset(shape, num2) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a]) - gcrootmap.add_ebx(shape) + gcrootmap.add_callee_save_reg(shape, 1) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a, 4]) - gcrootmap.add_esi(shape) + gcrootmap.add_callee_save_reg(shape, 2) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a, 4, 8]) - gcrootmap.add_edi(shape) + gcrootmap.add_callee_save_reg(shape, 3) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a, 4, 8, 12]) - gcrootmap.add_ebp(shape) + gcrootmap.add_callee_save_reg(shape, 4) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a, 4, 8, 12, 16]) # Modified: pypy/trunk/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/assembler.py (original) +++ pypy/trunk/pypy/jit/backend/x86/assembler.py Wed Sep 8 10:06:15 2010 @@ -273,7 +273,8 @@ if IS_X86_32: self.mc.MOV_sr(WORD, edx.value) # save it as the new argument elif IS_X86_64: - # FIXME: We can't just clobber rdi like this, can we? + # rdi can be clobbered: its content was forced to the stack + # by _fastpath_malloc(), like all other save_around_call_regs. self.mc.MOV_rr(edi.value, edx.value) addr = self.cpu.gc_ll_descr.get_malloc_fixedsize_slowpath_addr() @@ -1256,8 +1257,12 @@ sizeof_ti = rffi.sizeof(GCData.TYPE_INFO) type_info_group = llop.gc_get_type_info_group(llmemory.Address) type_info_group = rffi.cast(lltype.Signed, type_info_group) - expected_typeid = (classptr - sizeof_ti - type_info_group) >> 2 - self.mc.CMP16(mem(locs[0], 0), ImmedLoc(expected_typeid)) + expected_typeid = classptr - sizeof_ti - type_info_group + if IS_X86_32: + expected_typeid >>= 2 + self.mc.CMP16(mem(locs[0], 0), ImmedLoc(expected_typeid)) + elif IS_X86_64: + self.mc.CMP32_mi((locs[0].value, 0), expected_typeid) def genop_guard_guard_class(self, ign_1, guard_op, guard_token, locs, ign_2): self.mc.ensure_bytes_available(256) Modified: pypy/trunk/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/trunk/pypy/jit/backend/x86/regalloc.py Wed Sep 8 10:06:15 2010 @@ -26,6 +26,12 @@ no_lower_byte_regs = [esi, edi] save_around_call_regs = [eax, edx, ecx] + REGLOC_TO_GCROOTMAP_REG_INDEX = { + ebx: 1, + esi: 2, + edi: 3, + } + def call_result_location(self, v): return eax @@ -47,6 +53,13 @@ no_lower_byte_regs = [] save_around_call_regs = [eax, ecx, edx, esi, edi, r8, r9, r10] + REGLOC_TO_GCROOTMAP_REG_INDEX = { + ebx: 1, + r12: 2, + r13: 3, + r14: 4, + r15: 5, + } class FloatConstants(object): BASE_CONSTANT_SIZE = 1000 @@ -694,23 +707,18 @@ def _fastpath_malloc(self, op, descr): assert isinstance(descr, BaseSizeDescr) gc_ll_descr = self.assembler.cpu.gc_ll_descr - tmp0 = TempBox() self.rm.force_allocate_reg(op.result, selected_reg=eax) - self.rm.force_allocate_reg(tmp0, selected_reg=edx) - # XXX about the next 10 lines: why not just say - # force_allocate_reg(tmp1, selected_reg=ecx)????? - for v, reg in self.rm.reg_bindings.items(): - if reg is ecx: - to_sync = v - break - else: - to_sync = None - if to_sync is not None: - self.rm._sync_var(to_sync) - del self.rm.reg_bindings[to_sync] - self.rm.free_regs.append(ecx) - # we need to do it here, so edx is not in reg_bindings - self.rm.possibly_free_var(tmp0) + # We need to force-allocate each of save_around_call_regs now. + # The alternative would be to save and restore them around the + # actual call to malloc(), in the rare case where we need to do + # it; however, mark_gc_roots() would need to be adapted to know + # where the variables end up being saved. Messy. + for reg in self.rm.save_around_call_regs: + if reg is not eax: + tmp_box = TempBox() + self.rm.force_allocate_reg(tmp_box, selected_reg=reg) + self.rm.possibly_free_var(tmp_box) + self.assembler.malloc_cond_fixedsize( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), @@ -962,7 +970,7 @@ pass def get_mark_gc_roots(self, gcrootmap): - shape = gcrootmap.get_basic_shape() + shape = gcrootmap.get_basic_shape(IS_X86_64) for v, val in self.fm.frame_bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) @@ -971,15 +979,8 @@ if reg is eax: continue # ok to ignore this one if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): - if reg is ebx: - gcrootmap.add_ebx(shape) - elif reg is esi: - gcrootmap.add_esi(shape) - elif reg is edi: - gcrootmap.add_edi(shape) - else: - print "[get_mark_gc_roots] bogus register", reg - assert False + assert reg in self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX + gcrootmap.add_callee_save_reg(shape, self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX[reg]) return gcrootmap.compress_callshape(shape) def consider_force_token(self, op): Modified: pypy/trunk/pypy/jit/backend/x86/regloc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/regloc.py (original) +++ pypy/trunk/pypy/jit/backend/x86/regloc.py Wed Sep 8 10:06:15 2010 @@ -1,7 +1,7 @@ from pypy.jit.metainterp.history import AbstractValue, ConstInt from pypy.jit.backend.x86 import rx86 from pypy.rlib.unroll import unrolling_iterable -from pypy.jit.backend.x86.arch import WORD +from pypy.jit.backend.x86.arch import WORD, IS_X86_32, IS_X86_64 from pypy.tool.sourcetools import func_with_new_name from pypy.rlib.objectmodel import specialize from pypy.rlib.rarithmetic import intmask Modified: pypy/trunk/pypy/jit/backend/x86/rx86.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/rx86.py (original) +++ pypy/trunk/pypy/jit/backend/x86/rx86.py Wed Sep 8 10:06:15 2010 @@ -462,6 +462,8 @@ CMP_ji = select_8_or_32_bit_immed(CMP_ji8, CMP_ji32) CMP_rj = insn(rex_w, '\x3B', register(1, 8), '\x05', immediate(2)) + CMP32_mi = insn(rex_nw, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) + AND8_rr = insn(rex_w, '\x20', byte_register(1), byte_register(2,8), '\xC0') OR8_rr = insn(rex_w, '\x08', byte_register(1), byte_register(2,8), '\xC0') Modified: pypy/trunk/pypy/jit/backend/x86/test/test_gc_integration.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/test/test_gc_integration.py (original) +++ pypy/trunk/pypy/jit/backend/x86/test/test_gc_integration.py Wed Sep 8 10:06:15 2010 @@ -26,16 +26,13 @@ CPU = getcpuclass() class MockGcRootMap(object): - def get_basic_shape(self): + def get_basic_shape(self, is_64_bit): return ['shape'] def add_ebp_offset(self, shape, offset): shape.append(offset) - def add_ebx(self, shape): - shape.append('ebx') - def add_esi(self, shape): - shape.append('esi') - def add_edi(self, shape): - shape.append('edi') + def add_callee_save_reg(self, shape, reg_index): + index_to_name = { 1: 'ebx', 2: 'esi', 3: 'edi' } + shape.append(index_to_name[reg_index]) def compress_callshape(self, shape): assert shape[0] == 'shape' return ['compressed'] + shape[1:] Modified: pypy/trunk/pypy/jit/backend/x86/test/test_zrpy_gc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/test/test_zrpy_gc.py (original) +++ pypy/trunk/pypy/jit/backend/x86/test/test_zrpy_gc.py Wed Sep 8 10:06:15 2010 @@ -128,10 +128,6 @@ class TestCompileHybrid(object): def setup_class(cls): - if IS_X86_64: - # No hybrid GC on 64-bit for the time being - py.test.skip() - funcs = [] name_to_func = {} for fullname in dir(cls): Modified: pypy/trunk/pypy/jit/backend/x86/test/test_ztranslation.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/test/test_ztranslation.py (original) +++ pypy/trunk/pypy/jit/backend/x86/test/test_ztranslation.py Wed Sep 8 10:06:15 2010 @@ -125,10 +125,6 @@ return t def test_external_exception_handling_translates(self): - # FIXME - if IS_X86_64: - import py.test; py.test.skip() - jitdriver = JitDriver(greens = [], reds = ['n', 'total']) class ImDone(Exception): Modified: pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py ============================================================================== --- pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py (original) +++ pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py Wed Sep 8 10:06:15 2010 @@ -189,7 +189,7 @@ return r ''', 28, ([5], 120), - ([20], 2432902008176640000L)) + ([25], 15511210043330985984000000L)) def test_factorialrec(self): self.run_source(''' @@ -200,7 +200,7 @@ return 1 ''', 0, ([5], 120), - ([20], 2432902008176640000L)) + ([25], 15511210043330985984000000L)) def test_richards(self): self.run_source(''' Modified: pypy/trunk/pypy/rpython/memory/gctransform/asmgcroot.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gctransform/asmgcroot.py (original) +++ pypy/trunk/pypy/rpython/memory/gctransform/asmgcroot.py Wed Sep 8 10:06:15 2010 @@ -18,6 +18,7 @@ # The .s file produced by GCC is then parsed by trackgcroot.py. # +IS_64_BITS = sys.maxint > 2147483647 class AsmGcRootFrameworkGCTransformer(FrameworkGCTransformer): _asmgcc_save_restore_arguments = None @@ -326,7 +327,7 @@ ll_assert(reg < CALLEE_SAVED_REGS, "bad register location") return callee.regs_stored_at[reg] elif kind == LOC_ESP_PLUS: # in the caller stack frame at N(%esp) - esp_in_caller = callee.frame_address + 4 + esp_in_caller = callee.frame_address + sizeofaddr return esp_in_caller + offset elif kind == LOC_EBP_PLUS: # in the caller stack frame at N(%ebp) ebp_in_caller = callee.regs_stored_at[INDEX_OF_EBP].address[0] @@ -415,11 +416,12 @@ key1 = addr1.address[0] key2 = addr2.address[0] if key1 < key2: - return -1 + result = -1 elif key1 == key2: - return 0 + result = 0 else: - return 1 + result = 1 + return rffi.cast(rffi.INT, result) # ____________________________________________________________ @@ -464,9 +466,15 @@ # - frame address (actually the addr of the retaddr of the current function; # that's the last word of the frame in memory) # -CALLEE_SAVED_REGS = 4 # there are 4 callee-saved registers -INDEX_OF_EBP = 3 -FRAME_PTR = CALLEE_SAVED_REGS # the frame is at index 4 in the array + +if IS_64_BITS: + CALLEE_SAVED_REGS = 6 + INDEX_OF_EBP = 5 + FRAME_PTR = CALLEE_SAVED_REGS +else: + CALLEE_SAVED_REGS = 4 # there are 4 callee-saved registers + INDEX_OF_EBP = 3 + FRAME_PTR = CALLEE_SAVED_REGS # the frame is at index 4 in the array ASM_CALLBACK_PTR = lltype.Ptr(lltype.FuncType([], lltype.Void)) Modified: pypy/trunk/pypy/translator/c/gcc/instruction.py ============================================================================== --- pypy/trunk/pypy/translator/c/gcc/instruction.py (original) +++ pypy/trunk/pypy/translator/c/gcc/instruction.py Wed Sep 8 10:06:15 2010 @@ -5,6 +5,14 @@ LOC_MASK = 0x03 LOC_NOWHERE = LOC_REG | 0 +# x86-32 registers sometimes used to pass arguments when gcc optimizes +# a function's calling convention +ARGUMENT_REGISTERS_32 = ('%eax', '%edx', '%ecx') + +# x86-64 registers used to pass arguments +ARGUMENT_REGISTERS_64 = ('%rdi', '%rsi', '%rdx', '%rcx', '%r8', '%r9') + + def frameloc_esp(offset): assert offset >= 0 assert offset % 4 == 0 @@ -19,7 +27,8 @@ class SomeNewValue(object): - pass + def __repr__(self): + return 'somenewvalue' somenewvalue = SomeNewValue() class LocalVar(object): @@ -42,7 +51,7 @@ else: return 1 - def getlocation(self, framesize, uses_frame_pointer): + def getlocation(self, framesize, uses_frame_pointer, wordsize): if (self.hint == 'esp' or not uses_frame_pointer or self.ofs_from_frame_end % 2 != 0): # try to use esp-relative addressing @@ -52,7 +61,7 @@ # we can get an odd value if the framesize is marked as bogus # by visit_andl() assert uses_frame_pointer - ofs_from_ebp = self.ofs_from_frame_end + 4 + ofs_from_ebp = self.ofs_from_frame_end + wordsize return frameloc_ebp(ofs_from_ebp) @@ -81,22 +90,28 @@ self.previous_insns = [] # all insns that jump (or fallthrough) here class InsnFunctionStart(Insn): + _args_ = ['arguments'] framesize = 0 previous_insns = () - def __init__(self, registers): + def __init__(self, registers, wordsize): self.arguments = {} for reg in registers: self.arguments[reg] = somenewvalue + self.wordsize = wordsize def source_of(self, localvar, tag): if localvar not in self.arguments: - if localvar in ('%eax', '%edx', '%ecx'): + if self.wordsize == 4 and localvar in ARGUMENT_REGISTERS_32: # xxx this might show a bug in trackgcroot.py failing to # figure out which instruction stored a value in these # registers. However, this case also occurs when the # the function's calling convention was optimized by gcc: # the 3 registers above are then used to pass arguments pass + elif self.wordsize == 8 and localvar in ARGUMENT_REGISTERS_64: + # this is normal: these registers are always used to + # pass arguments + pass else: assert (isinstance(localvar, LocalVar) and localvar.ofs_from_frame_end > 0), ( @@ -218,15 +233,16 @@ return {self.loc: None} class InsnPrologue(Insn): + def __init__(self, wordsize): + self.wordsize = wordsize def __setattr__(self, attr, value): if attr == 'framesize': - assert value == 4, ("unrecognized function prologue - " - "only supports push %ebp; movl %esp, %ebp") + assert value == self.wordsize, ( + "unrecognized function prologue - " + "only supports push %ebp; movl %esp, %ebp") Insn.__setattr__(self, attr, value) class InsnEpilogue(Insn): def __init__(self, framesize=None): if framesize is not None: self.framesize = framesize - - Modified: pypy/trunk/pypy/translator/c/gcc/test/conftest.py ============================================================================== --- pypy/trunk/pypy/translator/c/gcc/test/conftest.py (original) +++ pypy/trunk/pypy/translator/c/gcc/test/conftest.py Wed Sep 8 10:06:15 2010 @@ -1,8 +1,6 @@ import py from pypy.jit.backend import detect_cpu - cpu = detect_cpu.autodetect() def pytest_runtest_setup(item): - if cpu != 'x86': + if cpu not in ('x86', 'x86_64'): py.test.skip("x86 directory skipped: cpu is %r" % (cpu,)) - Added: pypy/trunk/pypy/translator/c/gcc/test/elf64/track_32bit_reg_zeroextend.s ============================================================================== --- (empty file) +++ pypy/trunk/pypy/translator/c/gcc/test/elf64/track_32bit_reg_zeroextend.s Wed Sep 8 10:06:15 2010 @@ -0,0 +1,15 @@ + .type foobar, @function +foobar: + pushq %rbp + movq %rsp, %rbp + call some_function + ;; expected {8(%rbp) | %rbx, %r12, %r13, %r14, %r15, (%rbp) | } + movl $const1, %edx + movl $const2, %r10d + xorl %r10d, %r11d + /* GCROOT %rdx */ + /* GCROOT %r10 */ + /* GCROOT %r11 */ + leave + ret + .size foobar, .-foobar Added: pypy/trunk/pypy/translator/c/gcc/test/elf64/track_basic_argument_registers.s ============================================================================== --- (empty file) +++ pypy/trunk/pypy/translator/c/gcc/test/elf64/track_basic_argument_registers.s Wed Sep 8 10:06:15 2010 @@ -0,0 +1,31 @@ + .type foobar, @function +foobar: +.LFB0: + .cfi_startproc + pushq %rbp + .cfi_def_cfa_offset 16 + movq %rsp, %rbp + .cfi_offset 6, -16 + .cfi_def_cfa_register 6 + subq $48, %rsp + movq %rdi, -8(%rbp) + movq %rsi, -16(%rbp) + movq %rdx, -24(%rbp) + movq %rcx, -32(%rbp) + movq %r8, -40(%rbp) + movq %r9, -48(%rbp) + movl $0, %eax + call some_function + ;; expected {8(%rbp) | %rbx, %r12, %r13, %r14, %r15, (%rbp) | -8(%rbp), -16(%rbp), -24(%rbp), -32(%rbp), -40(%rbp), -48(%rbp)} + /* GCROOT -8(%rbp) */ + /* GCROOT -16(%rbp) */ + /* GCROOT -24(%rbp) */ + /* GCROOT -32(%rbp) */ + /* GCROOT -40(%rbp) */ + /* GCROOT -48(%rbp) */ + movq -24(%rbp), %rax + leave + ret + .cfi_endproc +.LFE0: + .size foobar, .-foobar Added: pypy/trunk/pypy/translator/c/gcc/test/elf64/track_jumptable.c ============================================================================== --- (empty file) +++ pypy/trunk/pypy/translator/c/gcc/test/elf64/track_jumptable.c Wed Sep 8 10:06:15 2010 @@ -0,0 +1,18 @@ +#include + +int foobar(int n) { + switch(n) { + case 0: + return 1; + case 1: + return 12; + case 2: + return 123; + case 3: + return 1234; + case 4: + return 12345; + default: + return 42; + } +} Added: pypy/trunk/pypy/translator/c/gcc/test/elf64/track_jumptable.s ============================================================================== --- (empty file) +++ pypy/trunk/pypy/translator/c/gcc/test/elf64/track_jumptable.s Wed Sep 8 10:06:15 2010 @@ -0,0 +1,48 @@ + .type foobar, @function +foobar: +.LFB0: + .cfi_startproc + pushq %rbp + .cfi_def_cfa_offset 16 + movq %rsp, %rbp + .cfi_offset 6, -16 + .cfi_def_cfa_register 6 + movl %edi, -4(%rbp) + cmpl $4, -4(%rbp) + ja .L2 + mov -4(%rbp), %eax + movq .L8(,%rax,8), %rax + jmp *%rax + .section .rodata + .align 8 + .align 4 +.L8: + .quad .L3 + .quad .L4 + .quad .L5 + .quad .L6 + .quad .L7 + .text +.L3: + movl $1, %eax + jmp .L9 +.L4: + movl $12, %eax + jmp .L9 +.L5: + movl $123, %eax + jmp .L9 +.L6: + movl $1234, %eax + jmp .L9 +.L7: + movl $12345, %eax + jmp .L9 +.L2: + movl $42, %eax +.L9: + leave + ret + .cfi_endproc +.LFE0: + .size foobar, .-foobar Added: pypy/trunk/pypy/translator/c/gcc/test/elf64/track_negative_rsp_offset.s ============================================================================== --- (empty file) +++ pypy/trunk/pypy/translator/c/gcc/test/elf64/track_negative_rsp_offset.s Wed Sep 8 10:06:15 2010 @@ -0,0 +1,17 @@ + .type some_function, @function +some_function: + ;; Test using a negative offset from %rsp (gcc sometimes does this) + movq %rbx, -8(%rsp) + subq $8, %rsp + + movq %rdi, %rbx + + call some_other_function + ;; expected {8(%rsp) | (%rsp), %r12, %r13, %r14, %r15, %rbp | %rbx} + /* GCROOT %rbx */ + + movq %rbx, %rax + ;; Same as where %rbx was saved above + movq (%rsp), %rbx + ret + .size some_function, .-some_function Added: pypy/trunk/pypy/translator/c/gcc/test/elf64/track_varargs_function.s ============================================================================== --- (empty file) +++ pypy/trunk/pypy/translator/c/gcc/test/elf64/track_varargs_function.s Wed Sep 8 10:06:15 2010 @@ -0,0 +1,58 @@ + .type PyErr_Format, @function +PyErr_Format: +.LFB67: + .cfi_startproc + pushq %rbp + .cfi_def_cfa_offset 16 + movzbl %al, %eax + pushq %rbx + .cfi_def_cfa_offset 24 + movq %rdi, %rbx + .cfi_offset 3, -24 + .cfi_offset 6, -16 + movq %rsi, %rdi + subq $216, %rsp + .cfi_def_cfa_offset 240 + movq %rdx, 48(%rsp) + leaq 0(,%rax,4), %rdx + movl $.L21, %eax + movq %rcx, 56(%rsp) + movq %r8, 64(%rsp) + movq %rsp, %rsi + subq %rdx, %rax + leaq 207(%rsp), %rdx + movq %r9, 72(%rsp) + jmp *%rax + movaps %xmm7, -15(%rdx) + movaps %xmm6, -31(%rdx) + movaps %xmm5, -47(%rdx) + movaps %xmm4, -63(%rdx) + movaps %xmm3, -79(%rdx) + movaps %xmm2, -95(%rdx) + movaps %xmm1, -111(%rdx) + movaps %xmm0, -127(%rdx) +.L21: + leaq 240(%rsp), %rax + movl $16, (%rsp) + movl $48, 4(%rsp) + movq %rax, 8(%rsp) + leaq 32(%rsp), %rax + movq %rax, 16(%rsp) + call PyString_FromFormatV + ;; expected {232(%rsp) | 216(%rsp), %r12, %r13, %r14, %r15, 224(%rsp) | } + movq %rbx, %rdi + movq %rax, %rbp + movq %rax, %rsi + call PyErr_SetObject + ;; expected {232(%rsp) | 216(%rsp), %r12, %r13, %r14, %r15, 224(%rsp) | } + movq %rbp, %rdi + call Py_DecRef + ;; expected {232(%rsp) | 216(%rsp), %r12, %r13, %r14, %r15, 224(%rsp) | } + addq $216, %rsp + xorl %eax, %eax + popq %rbx + popq %rbp + ret + .cfi_endproc +.LFE67: + .size PyErr_Format, .-PyErr_Format Modified: pypy/trunk/pypy/translator/c/gcc/test/test_trackgcroot.py ============================================================================== --- pypy/trunk/pypy/translator/c/gcc/test/test_trackgcroot.py (original) +++ pypy/trunk/pypy/translator/c/gcc/test/test_trackgcroot.py Wed Sep 8 10:06:15 2010 @@ -1,51 +1,52 @@ import py import sys, re -from pypy.translator.c.gcc.trackgcroot import format_location -from pypy.translator.c.gcc.trackgcroot import format_callshape from pypy.translator.c.gcc.trackgcroot import LOC_NOWHERE, LOC_REG from pypy.translator.c.gcc.trackgcroot import LOC_EBP_PLUS, LOC_EBP_MINUS from pypy.translator.c.gcc.trackgcroot import LOC_ESP_PLUS from pypy.translator.c.gcc.trackgcroot import ElfAssemblerParser from pypy.translator.c.gcc.trackgcroot import DarwinAssemblerParser -from pypy.translator.c.gcc.trackgcroot import compress_callshape -from pypy.translator.c.gcc.trackgcroot import decompress_callshape from pypy.translator.c.gcc.trackgcroot import PARSERS +from pypy.translator.c.gcc.trackgcroot import ElfFunctionGcRootTracker32 from StringIO import StringIO +import py.test this_dir = py.path.local(__file__).dirpath() def test_format_location(): - assert format_location(LOC_NOWHERE) == '?' - assert format_location(LOC_REG | (1<<2)) == '%ebx' - assert format_location(LOC_REG | (2<<2)) == '%esi' - assert format_location(LOC_REG | (3<<2)) == '%edi' - assert format_location(LOC_REG | (4<<2)) == '%ebp' - assert format_location(LOC_EBP_PLUS + 0) == '(%ebp)' - assert format_location(LOC_EBP_PLUS + 4) == '4(%ebp)' - assert format_location(LOC_EBP_MINUS + 4) == '-4(%ebp)' - assert format_location(LOC_ESP_PLUS + 0) == '(%esp)' - assert format_location(LOC_ESP_PLUS + 4) == '4(%esp)' + cls = ElfFunctionGcRootTracker32 + assert cls.format_location(LOC_NOWHERE) == '?' + assert cls.format_location(LOC_REG | (1<<2)) == '%ebx' + assert cls.format_location(LOC_REG | (2<<2)) == '%esi' + assert cls.format_location(LOC_REG | (3<<2)) == '%edi' + assert cls.format_location(LOC_REG | (4<<2)) == '%ebp' + assert cls.format_location(LOC_EBP_PLUS + 0) == '(%ebp)' + assert cls.format_location(LOC_EBP_PLUS + 4) == '4(%ebp)' + assert cls.format_location(LOC_EBP_MINUS + 4) == '-4(%ebp)' + assert cls.format_location(LOC_ESP_PLUS + 0) == '(%esp)' + assert cls.format_location(LOC_ESP_PLUS + 4) == '4(%esp)' def test_format_callshape(): + cls = ElfFunctionGcRootTracker32 expected = ('{4(%ebp) ' # position of the return address '| 8(%ebp), 12(%ebp), 16(%ebp), 20(%ebp) ' # 4 saved regs '| 24(%ebp), 28(%ebp)}') # GC roots - assert format_callshape((LOC_EBP_PLUS+4, - LOC_EBP_PLUS+8, - LOC_EBP_PLUS+12, - LOC_EBP_PLUS+16, - LOC_EBP_PLUS+20, - LOC_EBP_PLUS+24, - LOC_EBP_PLUS+28)) == expected + assert cls.format_callshape((LOC_EBP_PLUS+4, + LOC_EBP_PLUS+8, + LOC_EBP_PLUS+12, + LOC_EBP_PLUS+16, + LOC_EBP_PLUS+20, + LOC_EBP_PLUS+24, + LOC_EBP_PLUS+28)) == expected def test_compress_callshape(): + cls = ElfFunctionGcRootTracker32 shape = (1, 127, 0x1234, 0x5678, 0x234567, 0x765432, 0x61626364, 0x41424344) - bytes = list(compress_callshape(shape)) + bytes = list(cls.compress_callshape(shape)) print bytes assert len(bytes) == 1+1+2+3+4+4+5+5+1 - assert decompress_callshape(bytes) == list(shape) + assert cls.decompress_callshape(bytes) == list(shape) def test_find_functions_elf(): source = """\ @@ -108,7 +109,7 @@ def test_computegcmaptable(): tests = [] - for format in ('elf', 'darwin', 'msvc'): + for format in ('elf', 'darwin', 'msvc', 'elf64'): for path in this_dir.join(format).listdir("track*.s"): n = path.purebasename[5:] try: @@ -138,7 +139,7 @@ tabledict = {} seen = {} for entry in table: - print '%s: %s' % (entry[0], format_callshape(entry[1])) + print '%s: %s' % (entry[0], tracker.format_callshape(entry[1])) tabledict[entry[0]] = entry[1] # find the ";; expected" lines prevline = "" @@ -151,7 +152,7 @@ label = prevmatch.group(1) assert label in tabledict got = tabledict[label] - assert format_callshape(got) == expected + assert tracker.format_callshape(got) == expected seen[label] = True if format == 'msvc': expectedlines.insert(i-2, 'PUBLIC\t%s\n' % (label,)) Modified: pypy/trunk/pypy/translator/c/gcc/trackgcroot.py ============================================================================== --- pypy/trunk/pypy/translator/c/gcc/trackgcroot.py (original) +++ pypy/trunk/pypy/translator/c/gcc/trackgcroot.py Wed Sep 8 10:06:15 2010 @@ -72,7 +72,7 @@ if self.is_stack_bottom: retaddr = LOC_NOWHERE # end marker for asmgcroot.py elif self.uses_frame_pointer: - retaddr = frameloc_ebp(4) + retaddr = frameloc_ebp(self.WORD) else: retaddr = frameloc_esp(insn.framesize) shape = [retaddr] @@ -84,7 +84,8 @@ for localvar, tag in insn.gcroots.items(): if isinstance(localvar, LocalVar): loc = localvar.getlocation(insn.framesize, - self.uses_frame_pointer) + self.uses_frame_pointer, + self.WORD) elif localvar in self.REG2LOC: loc = self.REG2LOC[localvar] else: @@ -148,7 +149,7 @@ lst.append(previnsn) def parse_instructions(self): - self.insns = [InsnFunctionStart(self.CALLEE_SAVE_REGISTERS)] + self.insns = [InsnFunctionStart(self.CALLEE_SAVE_REGISTERS, self.WORD)] ignore_insns = False for lineno, line in enumerate(self.lines): if lineno < self.skip: @@ -263,7 +264,7 @@ ofs_from_ebp = int(match.group(1) or '0') if self.format == 'msvc': ofs_from_ebp += int(match.group(2) or '0') - localvar = ofs_from_ebp - 4 + localvar = ofs_from_ebp - self.WORD assert localvar != 0 # that's the return address return LocalVar(localvar, hint='ebp') return localvar @@ -357,6 +358,56 @@ self.lines.insert(call.lineno+1, '\t.globl\t%s\n' % (label,)) call.global_label = label + @classmethod + def compress_callshape(cls, shape): + # For a single shape, this turns the list of integers into a list of + # bytes and reverses the order of the entries. The length is + # encoded by inserting a 0 marker after the gc roots coming from + # shape[N:] and before the N values coming from shape[N-1] to + # shape[0] (for N == 5 on 32-bit or 7 on 64-bit platforms). + # In practice it seems that shapes contain many integers + # whose value is up to a few thousands, which the algorithm below + # compresses down to 2 bytes. Very small values compress down to a + # single byte. + + # Callee-save regs plus ret addr + min_size = len(cls.CALLEE_SAVE_REGISTERS) + 1 + + assert len(shape) >= min_size + shape = list(shape) + assert 0 not in shape[min_size:] + shape.insert(min_size, 0) + result = [] + for loc in shape: + assert loc >= 0 + flag = 0 + while loc >= 0x80: + result.append(int(loc & 0x7F) | flag) + flag = 0x80 + loc >>= 7 + result.append(int(loc) | flag) + result.reverse() + return result + + @classmethod + def decompress_callshape(cls, bytes): + # For tests. This logic is copied in asmgcroot.py. + result = [] + n = 0 + while n < len(bytes): + value = 0 + while True: + b = bytes[n] + n += 1 + value += b + if b < 0x80: + break + value = (value - 0x80) << 7 + result.append(value) + result.reverse() + assert result[5] == 0 + del result[5] + return result # ____________________________________________________________ CANNOT_COLLECT = { # some of the most used functions that cannot collect @@ -385,10 +436,9 @@ 'inc', 'dec', 'not', 'neg', 'or', 'and', 'sbb', 'adc', 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', + 'punpck', 'pshufd', # zero-extending moves should not produce GC pointers 'movz', - # quadword operations - 'movq', ]) visit_movb = visit_nop @@ -400,7 +450,7 @@ visit_xorb = visit_nop visit_xorw = visit_nop - def visit_addl(self, line, sign=+1): + def _visit_add(self, line, sign=+1): match = self.r_binaryinsn.match(line) source = match.group("source") target = match.group("target") @@ -415,8 +465,8 @@ else: return [] - def visit_subl(self, line): - return self.visit_addl(line, sign=-1) + def _visit_sub(self, line): + return self._visit_add(line, sign=-1) def unary_insn(self, line): match = self.r_unaryinsn.match(line) @@ -439,8 +489,6 @@ else: return [] - visit_xorl = binary_insn # used in "xor reg, reg" to create a NULL GC ptr - visit_orl = binary_insn # The various cmov* operations for name in ''' e ne g ge l le a ae b be p np s ns o no @@ -448,7 +496,7 @@ locals()['visit_cmov' + name] = binary_insn locals()['visit_cmov' + name + 'l'] = binary_insn - def visit_andl(self, line): + def _visit_and(self, line): match = self.r_binaryinsn.match(line) target = match.group("target") if target == self.ESP: @@ -460,9 +508,7 @@ else: return self.binary_insn(line) - visit_and = visit_andl - - def visit_leal(self, line): + def _visit_lea(self, line): match = self.r_binaryinsn.match(line) target = match.group("target") if target == self.ESP: @@ -474,7 +520,7 @@ raise UnrecognizedOperation('epilogue without prologue') ofs_from_ebp = int(match.group(1) or '0') assert ofs_from_ebp <= 0 - framesize = 4 - ofs_from_ebp + framesize = self.WORD - ofs_from_ebp else: match = self.r_localvar_esp.match(source) # leal 12(%esp), %esp @@ -489,17 +535,23 @@ def insns_for_copy(self, source, target): source = self.replace_symbols(source) target = self.replace_symbols(target) - if source == self.ESP or target == self.ESP: + if target == self.ESP: raise UnrecognizedOperation('%s -> %s' % (source, target)) elif self.r_localvar.match(target): if self.r_localvar.match(source): + # eg, movl %eax, %ecx: possibly copies a GC root return [InsnCopyLocal(source, target)] else: + # eg, movl (%eax), %edi or mov %esp, %edi: load a register + # from "outside". If it contains a pointer to a GC root, + # it will be announced later with the GCROOT macro. return [InsnSetLocal(target, [source])] else: + # eg, movl %ebx, (%edx) or mov %ebp, %esp: does not write into + # a general register return [] - def visit_movl(self, line): + def _visit_mov(self, line): match = self.r_binaryinsn.match(line) source = match.group("source") target = match.group("target") @@ -513,34 +565,24 @@ # gcc -fno-unit-at-a-time. return self.insns_for_copy(source, target) - visit_mov = visit_movl - - def visit_pushl(self, line): + def _visit_push(self, line): match = self.r_unaryinsn.match(line) source = match.group(1) - return [InsnStackAdjust(-4)] + self.insns_for_copy(source, self.TOP_OF_STACK) - - def visit_pushw(self, line): - return [InsnStackAdjust(-2)] # rare but not impossible + return [InsnStackAdjust(-self.WORD)] + self.insns_for_copy(source, self.TOP_OF_STACK) def _visit_pop(self, target): - return self.insns_for_copy(self.TOP_OF_STACK, target) + [InsnStackAdjust(+4)] - - def visit_popl(self, line): - match = self.r_unaryinsn.match(line) - target = match.group(1) - return self._visit_pop(target) + return self.insns_for_copy(self.TOP_OF_STACK, target) + [InsnStackAdjust(+self.WORD)] def _visit_prologue(self): # for the prologue of functions that use %ebp as frame pointer self.uses_frame_pointer = True self.r_localvar = self.r_localvarfp - return [InsnPrologue()] + return [InsnPrologue(self.WORD)] def _visit_epilogue(self): if not self.uses_frame_pointer: raise UnrecognizedOperation('epilogue without prologue') - return [InsnEpilogue(4)] + return [InsnEpilogue(self.WORD)] def visit_leave(self, line): return self._visit_epilogue() + self._visit_pop(self.EBP) @@ -662,7 +704,7 @@ visit_jc = conditional_jump visit_jnc = conditional_jump - def visit_xchgl(self, line): + def _visit_xchg(self, line): # only support the format used in VALGRIND_DISCARD_TRANSLATIONS # which is to use a marker no-op "xchgl %ebx, %ebx" match = self.r_binaryinsn.match(line) @@ -741,8 +783,172 @@ insns.append(InsnStackAdjust(16)) return insns + # __________ debugging output __________ + + @classmethod + def format_location(cls, loc): + # A 'location' is a single number describing where a value is stored + # across a call. It can be in one of the CALLEE_SAVE_REGISTERS, or + # in the stack frame at an address relative to either %esp or %ebp. + # The last two bits of the location number are used to tell the cases + # apart; see format_location(). + assert loc >= 0 + kind = loc & LOC_MASK + if kind == LOC_REG: + if loc == LOC_NOWHERE: + return '?' + reg = (loc >> 2) - 1 + return '%' + cls.CALLEE_SAVE_REGISTERS[reg].replace("%", "") + else: + offset = loc & ~ LOC_MASK + if kind == LOC_EBP_PLUS: + result = '(%' + cls.EBP.replace("%", "") + ')' + elif kind == LOC_EBP_MINUS: + result = '(%' + cls.EBP.replace("%", "") + ')' + offset = -offset + elif kind == LOC_ESP_PLUS: + result = '(%' + cls.ESP.replace("%", "") + ')' + else: + assert 0, kind + if offset != 0: + result = str(offset) + result + return result + + @classmethod + def format_callshape(cls, shape): + # A 'call shape' is a tuple of locations in the sense of + # format_location(). They describe where in a function frame + # interesting values are stored, when this function executes a 'call' + # instruction. + # + # shape[0] is the location that stores the fn's own return + # address (not the return address for the currently + # executing 'call') + # + # shape[1..N] is where the fn saved its own caller's value of a + # certain callee save register. (where N is the number + # of callee save registers.) + # + # shape[>N] are GC roots: where the fn has put its local GCPTR + # vars + # + num_callee_save_regs = len(cls.CALLEE_SAVE_REGISTERS) + assert isinstance(shape, tuple) + # + 1 for the return address + assert len(shape) >= (num_callee_save_regs + 1) + result = [cls.format_location(loc) for loc in shape] + return '{%s | %s | %s}' % (result[0], + ', '.join(result[1:(num_callee_save_regs+1)]), + ', '.join(result[(num_callee_save_regs+1):])) + + +class FunctionGcRootTracker32(FunctionGcRootTracker): + WORD = 4 + + visit_mov = FunctionGcRootTracker._visit_mov + visit_movl = FunctionGcRootTracker._visit_mov + visit_pushl = FunctionGcRootTracker._visit_push + visit_leal = FunctionGcRootTracker._visit_lea + + visit_addl = FunctionGcRootTracker._visit_add + visit_subl = FunctionGcRootTracker._visit_sub + visit_andl = FunctionGcRootTracker._visit_and + visit_and = FunctionGcRootTracker._visit_and + + visit_xchgl = FunctionGcRootTracker._visit_xchg + + # used in "xor reg, reg" to create a NULL GC ptr + visit_xorl = FunctionGcRootTracker.binary_insn + visit_orl = FunctionGcRootTracker.binary_insn # unsure about this one + + # occasionally used on 32-bits to move floats around + visit_movq = FunctionGcRootTracker.visit_nop + + def visit_pushw(self, line): + return [InsnStackAdjust(-2)] # rare but not impossible -class ElfFunctionGcRootTracker(FunctionGcRootTracker): + def visit_popl(self, line): + match = self.r_unaryinsn.match(line) + target = match.group(1) + return self._visit_pop(target) + +class FunctionGcRootTracker64(FunctionGcRootTracker): + WORD = 8 + + # Regex ignores destination + r_save_xmm_register = re.compile(r"\tmovaps\s+%xmm(\d+)") + + def _maybe_32bit_dest(func): + def wrapper(self, line): + # Using a 32-bit reg as a destination in 64-bit mode zero-extends + # to 64-bits, so sometimes gcc uses a 32-bit operation to copy a + # statically known pointer to a register + + # %eax -> %rax + new_line = re.sub(r"%e(ax|bx|cx|dx|di|si)$", r"%r\1", line) + # %r10d -> %r10 + new_line = re.sub(r"%r(\d+)d$", r"%r\1", new_line) + return func(self, new_line) + return wrapper + + visit_addl = FunctionGcRootTracker.visit_nop + visit_subl = FunctionGcRootTracker.visit_nop + visit_leal = FunctionGcRootTracker.visit_nop + + visit_cltq = FunctionGcRootTracker.visit_nop + + visit_movq = FunctionGcRootTracker._visit_mov + # just a special assembler mnemonic for mov + visit_movabsq = FunctionGcRootTracker._visit_mov + visit_mov = _maybe_32bit_dest(FunctionGcRootTracker._visit_mov) + visit_movl = visit_mov + + visit_xorl = _maybe_32bit_dest(FunctionGcRootTracker.binary_insn) + + visit_pushq = FunctionGcRootTracker._visit_push + + visit_addq = FunctionGcRootTracker._visit_add + visit_subq = FunctionGcRootTracker._visit_sub + + visit_leaq = FunctionGcRootTracker._visit_lea + + visit_xorq = FunctionGcRootTracker.binary_insn + + # FIXME: similar to visit_popl for 32-bit + def visit_popq(self, line): + match = self.r_unaryinsn.match(line) + target = match.group(1) + return self._visit_pop(target) + + def visit_jmp(self, line): + # On 64-bit, %al is used when calling varargs functions to specify an + # upper-bound on the number of xmm registers used in the call. gcc + # uses %al to compute an indirect jump that looks like: + # + # jmp *[some register] + # movaps %xmm7, [stack location] + # movaps %xmm6, [stack location] + # movaps %xmm5, [stack location] + # movaps %xmm4, [stack location] + # movaps %xmm3, [stack location] + # movaps %xmm2, [stack location] + # movaps %xmm1, [stack location] + # movaps %xmm0, [stack location] + # + # The jmp is always to somewhere in the block of "movaps" + # instructions, according to how many xmm registers need to be saved + # to the stack. The point of all this is that we can safely ignore + # jmp instructions of that form. + if (self.currentlineno + 8) < len(self.lines) and self.r_unaryinsn_star.match(line): + matches = [self.r_save_xmm_register.match(self.lines[self.currentlineno + 1 + i]) for i in range(8)] + if all(m and int(m.group(1)) == (7 - i) for i, m in enumerate(matches)): + return [] + + return FunctionGcRootTracker.visit_jmp(self, line) + + + +class ElfFunctionGcRootTracker32(FunctionGcRootTracker32): format = 'elf' ESP = '%esp' @@ -791,7 +997,65 @@ match = self.r_functionend.match(lines[-1]) assert funcname == match.group(1) assert funcname == match.group(2) - super(ElfFunctionGcRootTracker, self).__init__( + super(ElfFunctionGcRootTracker32, self).__init__( + funcname, lines, filetag) + + def extract_immediate(self, value): + if not value.startswith('$'): + return None + return int(value[1:]) + +ElfFunctionGcRootTracker32.init_regexp() + +class ElfFunctionGcRootTracker64(FunctionGcRootTracker64): + format = 'elf64' + ESP = '%rsp' + EBP = '%rbp' + EAX = '%rax' + CALLEE_SAVE_REGISTERS = ['%rbx', '%r12', '%r13', '%r14', '%r15', '%rbp'] + REG2LOC = dict((_reg, LOC_REG | ((_i+1)<<2)) + for _i, _reg in enumerate(CALLEE_SAVE_REGISTERS)) + OPERAND = r'(?:[-\w$%+.:@"]+(?:[(][\w%,]+[)])?|[(][\w%,]+[)])' + LABEL = r'([a-zA-Z_$.][a-zA-Z0-9_$@.]*)' + OFFSET_LABELS = 2**30 + TOP_OF_STACK = '0(%rsp)' + + r_functionstart = re.compile(r"\t.type\s+"+LABEL+",\s*[@]function\s*$") + r_functionend = re.compile(r"\t.size\s+"+LABEL+",\s*[.]-"+LABEL+"\s*$") + LOCALVAR = r"%rax|%rbx|%rcx|%rdx|%rdi|%rsi|%rbp|%r8|%r9|%r10|%r11|%r12|%r13|%r14|%r15|-?\d*[(]%rsp[)]" + LOCALVARFP = LOCALVAR + r"|-?\d*[(]%rbp[)]" + r_localvarnofp = re.compile(LOCALVAR) + r_localvarfp = re.compile(LOCALVARFP) + r_localvar_esp = re.compile(r"(-?\d*)[(]%rsp[)]") + r_localvar_ebp = re.compile(r"(-?\d*)[(]%rbp[)]") + + r_rel_label = re.compile(r"(\d+):\s*$") + r_jump_rel_label = re.compile(r"\tj\w+\s+"+"(\d+)f"+"\s*$") + + r_unaryinsn_star= re.compile(r"\t[a-z]\w*\s+[*]("+OPERAND+")\s*$") + r_jmptable_item = re.compile(r"\t.quad\t"+LABEL+"(-\"[A-Za-z0-9$]+\")?\s*$") + r_jmptable_end = re.compile(r"\t.text|\t.section\s+.text|\t\.align|"+LABEL) + + r_gcroot_marker = re.compile(r"\t/[*] GCROOT ("+LOCALVARFP+") [*]/") + r_gcnocollect_marker = re.compile(r"\t/[*] GC_NOCOLLECT ("+OPERAND+") [*]/") + r_bottom_marker = re.compile(r"\t/[*] GC_STACK_BOTTOM [*]/") + + FUNCTIONS_NOT_RETURNING = { + 'abort': None, + '_exit': None, + '__assert_fail': None, + '___assert_rtn': None, + 'L___assert_rtn$stub': None, + 'L___eprintf$stub': None, + } + + def __init__(self, lines, filetag=0): + match = self.r_functionstart.match(lines[0]) + funcname = match.group(1) + match = self.r_functionend.match(lines[-1]) + assert funcname == match.group(1) + assert funcname == match.group(2) + super(ElfFunctionGcRootTracker64, self).__init__( funcname, lines, filetag) def extract_immediate(self, value): @@ -799,9 +1063,9 @@ return None return int(value[1:]) -ElfFunctionGcRootTracker.init_regexp() +ElfFunctionGcRootTracker64.init_regexp() -class DarwinFunctionGcRootTracker(ElfFunctionGcRootTracker): +class DarwinFunctionGcRootTracker(ElfFunctionGcRootTracker32): format = 'darwin' r_functionstart = re.compile(r"_(\w+):\s*$") @@ -810,7 +1074,7 @@ def __init__(self, lines, filetag=0): match = self.r_functionstart.match(lines[0]) funcname = '_' + match.group(1) - FunctionGcRootTracker.__init__(self, funcname, lines, filetag) + FunctionGcRootTracker32.__init__(self, funcname, lines, filetag) class Mingw32FunctionGcRootTracker(DarwinFunctionGcRootTracker): format = 'mingw32' @@ -821,7 +1085,7 @@ '__assert': None, } -class MsvcFunctionGcRootTracker(FunctionGcRootTracker): +class MsvcFunctionGcRootTracker(FunctionGcRootTracker32): format = 'msvc' ESP = 'esp' EBP = 'ebp' @@ -906,12 +1170,12 @@ push pop mov lea xor sub add '''.split(): - locals()['visit_' + name] = getattr(FunctionGcRootTracker, + locals()['visit_' + name] = getattr(FunctionGcRootTracker32, 'visit_' + name + 'l') - visit_int = FunctionGcRootTracker.visit_nop + visit_int = FunctionGcRootTracker32.visit_nop # probably not GC pointers - visit_cdq = FunctionGcRootTracker.visit_nop + visit_cdq = FunctionGcRootTracker32.visit_nop def visit_npad(self, line): # MASM has a nasty bug: it implements "npad 5" with "add eax, 0" @@ -1038,7 +1302,7 @@ table = tracker.computegcmaptable(self.verbose) if self.verbose > 1: for label, state in table: - print >> sys.stderr, label, '\t', format_callshape(state) + print >> sys.stderr, label, '\t', tracker.format_callshape(state) table = compress_gcmaptable(table) if self.shuffle and random.random() < 0.5: self.gcmaptable[:0] = table @@ -1049,7 +1313,7 @@ class ElfAssemblerParser(AssemblerParser): format = "elf" - FunctionGcRootTracker = ElfFunctionGcRootTracker + FunctionGcRootTracker = ElfFunctionGcRootTracker32 def find_functions(self, iterlines): functionlines = [] @@ -1072,6 +1336,10 @@ "missed the end of the previous function") yield False, functionlines +class ElfAssemblerParser64(ElfAssemblerParser): + format = "elf64" + FunctionGcRootTracker = ElfFunctionGcRootTracker64 + class DarwinAssemblerParser(AssemblerParser): format = "darwin" FunctionGcRootTracker = DarwinFunctionGcRootTracker @@ -1241,6 +1509,7 @@ PARSERS = { 'elf': ElfAssemblerParser, + 'elf64': ElfAssemblerParser64, 'darwin': DarwinAssemblerParser, 'mingw32': Mingw32AssemblerParser, 'msvc': MsvcAssemblerParser, @@ -1281,6 +1550,13 @@ txt = kwargs[self.format] print >> output, "\t%s" % txt + if self.format == 'elf64': + word_decl = '.quad' + else: + word_decl = '.long' + + tracker_cls = PARSERS[self.format].FunctionGcRootTracker + # The pypy_asm_stackwalk() function if self.format == 'msvc': @@ -1327,7 +1603,56 @@ } } """ + elif self.format == 'elf64': + print >> output, "\t.text" + print >> output, "\t.globl %s" % _globalname('pypy_asm_stackwalk') + print >> output, "\t.type pypy_asm_stackwalk, @function" + print >> output, "%s:" % _globalname('pypy_asm_stackwalk') + + print >> output, """\ + /* See description in asmgcroot.py */ + movq\t%rdi, %rdx\t/* 1st argument, which is the callback */ + movq\t%rsi, %rcx\t/* 2nd argument, which is gcrootanchor */ + movq\t%rsp, %rax\t/* my frame top address */ + pushq\t%rax\t\t/* ASM_FRAMEDATA[8] */ + pushq\t%rbp\t\t/* ASM_FRAMEDATA[7] */ + pushq\t%r15\t\t/* ASM_FRAMEDATA[6] */ + pushq\t%r14\t\t/* ASM_FRAMEDATA[5] */ + pushq\t%r13\t\t/* ASM_FRAMEDATA[4] */ + pushq\t%r12\t\t/* ASM_FRAMEDATA[3] */ + pushq\t%rbx\t\t/* ASM_FRAMEDATA[2] */ + + /* Add this ASM_FRAMEDATA to the front of the circular linked */ + /* list. Let's call it 'self'. */ + movq\t8(%rcx), %rax\t/* next = gcrootanchor->next */ + pushq\t%rax\t\t\t\t/* self->next = next */ + pushq\t%rcx\t\t\t/* self->prev = gcrootanchor */ + movq\t%rsp, 8(%rcx)\t/* gcrootanchor->next = self */ + movq\t%rsp, 0(%rax)\t\t\t/* next->prev = self */ + + /* note: the Mac OS X 16 bytes aligment must be respected. */ + call\t*%rdx\t\t/* invoke the callback */ + + /* Detach this ASM_FRAMEDATA from the circular linked list */ + popq\t%rsi\t\t/* prev = self->prev */ + popq\t%rdi\t\t/* next = self->next */ + movq\t%rdi, 8(%rsi)\t/* prev->next = next */ + movq\t%rsi, 0(%rdi)\t/* next->prev = prev */ + + popq\t%rbx\t\t/* restore from ASM_FRAMEDATA[2] */ + popq\t%r12\t\t/* restore from ASM_FRAMEDATA[3] */ + popq\t%r13\t\t/* restore from ASM_FRAMEDATA[4] */ + popq\t%r14\t\t/* restore from ASM_FRAMEDATA[5] */ + popq\t%r15\t\t/* restore from ASM_FRAMEDATA[6] */ + popq\t%rbp\t\t/* restore from ASM_FRAMEDATA[7] */ + popq\t%rcx\t\t/* ignored ASM_FRAMEDATA[8] */ + + /* the return value is the one of the 'call' above, */ + /* because %rax (and possibly %rdx) are unmodified */ + ret + .size pypy_asm_stackwalk, .-pypy_asm_stackwalk + """ else: print >> output, "\t.text" print >> output, "\t.globl %s" % _globalname('pypy_asm_stackwalk') @@ -1401,7 +1726,7 @@ n = shapes[state] except KeyError: n = shapes[state] = shapeofs - bytes = [str(b) for b in compress_callshape(state)] + bytes = [str(b) for b in tracker_cls.compress_callshape(state)] shapelines.append('\t%s,\t/* %s */\n' % ( ', '.join(bytes), shapeofs)) @@ -1433,17 +1758,18 @@ n = shapes[state] except KeyError: n = shapes[state] = shapeofs - bytes = [str(b) for b in compress_callshape(state)] + bytes = [str(b) for b in tracker_cls.compress_callshape(state)] shapelines.append('\t/*%d*/\t.byte\t%s\n' % ( shapeofs, ', '.join(bytes))) shapeofs += len(bytes) if is_range: n = ~ n - print >> output, '\t.long\t%s-%d' % ( + print >> output, '\t%s\t%s-%d' % ( + word_decl, label, - PARSERS[self.format].FunctionGcRootTracker.OFFSET_LABELS) - print >> output, '\t.long\t%d' % (n,) + tracker_cls.OFFSET_LABELS) + print >> output, '\t%s\t%d' % (word_decl, n) print >> output, """\ .globl __gcmapend @@ -1451,6 +1777,7 @@ """.replace("__gcmapend", _globalname("__gcmapend")) _variant(elf='.section\t.rodata', + elf64='.section\t.rodata', darwin='.const', mingw32='') @@ -1483,56 +1810,6 @@ pass -# __________ debugging output __________ - -def format_location(loc): - # A 'location' is a single number describing where a value is stored - # across a call. It can be in one of the CALLEE_SAVE_REGISTERS, or - # in the stack frame at an address relative to either %esp or %ebp. - # The last two bits of the location number are used to tell the cases - # apart; see format_location(). - assert loc >= 0 - kind = loc & LOC_MASK - if kind == LOC_REG: - if loc == LOC_NOWHERE: - return '?' - reg = (loc >> 2) - 1 - return ElfFunctionGcRootTracker.CALLEE_SAVE_REGISTERS[reg] - else: - offset = loc & ~ LOC_MASK - if kind == LOC_EBP_PLUS: - result = '(%ebp)' - elif kind == LOC_EBP_MINUS: - result = '(%ebp)' - offset = -offset - elif kind == LOC_ESP_PLUS: - result = '(%esp)' - else: - assert 0, kind - if offset != 0: - result = str(offset) + result - return result - -def format_callshape(shape): - # A 'call shape' is a tuple of locations in the sense of format_location(). - # They describe where in a function frame interesting values are stored, - # when this function executes a 'call' instruction. - # - # shape[0] is the location that stores the fn's own return address - # (not the return address for the currently executing 'call') - # shape[1] is where the fn saved its own caller's %ebx value - # shape[2] is where the fn saved its own caller's %esi value - # shape[3] is where the fn saved its own caller's %edi value - # shape[4] is where the fn saved its own caller's %ebp value - # shape[>=5] are GC roots: where the fn has put its local GCPTR vars - # - assert isinstance(shape, tuple) - assert len(shape) >= 5 - result = [format_location(loc) for loc in shape] - return '{%s | %s | %s}' % (result[0], - ', '.join(result[1:5]), - ', '.join(result[5:])) - # __________ table compression __________ def compress_gcmaptable(table): @@ -1559,49 +1836,6 @@ yield (label1, state, is_range) i = j -def compress_callshape(shape): - # For a single shape, this turns the list of integers into a list of - # bytes and reverses the order of the entries. The length is - # encoded by inserting a 0 marker after the gc roots coming from - # shape[5:] and before the 5 values coming from shape[4] to - # shape[0]. In practice it seems that shapes contain many integers - # whose value is up to a few thousands, which the algorithm below - # compresses down to 2 bytes. Very small values compress down to a - # single byte. - assert len(shape) >= 5 - shape = list(shape) - assert 0 not in shape[5:] - shape.insert(5, 0) - result = [] - for loc in shape: - assert loc >= 0 - flag = 0 - while loc >= 0x80: - result.append(int(loc & 0x7F) | flag) - flag = 0x80 - loc >>= 7 - result.append(int(loc) | flag) - result.reverse() - return result - -def decompress_callshape(bytes): - # For tests. This logic is copied in asmgcroot.py. - result = [] - n = 0 - while n < len(bytes): - value = 0 - while True: - b = bytes[n] - n += 1 - value += b - if b < 0x80: - break - value = (value - 0x80) << 7 - result.append(value) - result.reverse() - assert result[5] == 0 - del result[5] - return result def getidentifier(s): def mapchar(c): @@ -1626,7 +1860,10 @@ elif sys.platform == 'win32': format = 'mingw32' else: - format = 'elf' + if sys.maxint > 2147483647: + format = 'elf64' + else: + format = 'elf' entrypoint = 'main' while len(sys.argv) > 1: if sys.argv[1] == '-v': Modified: pypy/trunk/pypy/translator/platform/linux.py ============================================================================== --- pypy/trunk/pypy/translator/platform/linux.py (original) +++ pypy/trunk/pypy/translator/platform/linux.py Wed Sep 8 10:06:15 2010 @@ -3,7 +3,7 @@ from pypy.translator.platform import _run_subprocess from pypy.translator.platform.posix import BasePosix -class Linux(BasePosix): +class BaseLinux(BasePosix): name = "linux" link_flags = ('-pthread', '-lrt') @@ -25,10 +25,12 @@ return self._pkg_config("libffi", "--libs-only-L", ['/usr/lib/libffi']) + +class Linux(BaseLinux): def library_dirs_for_libffi_a(self): # places where we need to look for libffi.a return self.library_dirs_for_libffi() + ['/usr/lib'] -class Linux64(Linux): - shared_only = ('-fPIC',) +class Linux64(BaseLinux): + pass Modified: pypy/trunk/pypy/translator/platform/posix.py ============================================================================== --- pypy/trunk/pypy/translator/platform/posix.py (original) +++ pypy/trunk/pypy/translator/platform/posix.py Wed Sep 8 10:06:15 2010 @@ -104,6 +104,11 @@ else: target_name = exe_name.basename + if shared: + cflags = self.cflags + self.shared_only + else: + cflags = self.cflags + self.standalone_only + m = GnuMakefile(path) m.exe_name = exe_name m.eci = eci @@ -132,7 +137,7 @@ ('LIBS', self._libs(eci.libraries)), ('LIBDIRS', self._libdirs(eci.library_dirs)), ('INCLUDEDIRS', self._includedirs(rel_includedirs)), - ('CFLAGS', self.cflags), + ('CFLAGS', cflags), ('CFLAGSEXTRA', list(eci.compile_extra)), ('LDFLAGS', linkflags), ('LDFLAGSEXTRA', list(eci.link_extra)), From arigo at codespeak.net Wed Sep 8 10:06:39 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 8 Sep 2010 10:06:39 +0200 (CEST) Subject: [pypy-svn] r76932 - pypy/branch/asmgcc-64 Message-ID: <20100908080639.53680282BD6@codespeak.net> Author: arigo Date: Wed Sep 8 10:06:37 2010 New Revision: 76932 Removed: pypy/branch/asmgcc-64/ Log: Remove merged branch. From arigo at codespeak.net Wed Sep 8 10:30:38 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 8 Sep 2010 10:30:38 +0200 (CEST) Subject: [pypy-svn] r76933 - in pypy/branch/gc-module/pypy/module/gc: . test Message-ID: <20100908083038.17B28282BDC@codespeak.net> Author: arigo Date: Wed Sep 8 10:30:33 2010 New Revision: 76933 Modified: pypy/branch/gc-module/pypy/module/gc/__init__.py pypy/branch/gc-module/pypy/module/gc/referents.py pypy/branch/gc-module/pypy/module/gc/test/test_referents.py Log: gc.get_referrers(). Modified: pypy/branch/gc-module/pypy/module/gc/__init__.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/__init__.py (original) +++ pypy/branch/gc-module/pypy/module/gc/__init__.py Wed Sep 8 10:30:33 2010 @@ -24,6 +24,7 @@ 'get_rpy_type_index': 'referents.get_rpy_type_index', 'get_objects': 'referents.get_objects', 'get_referents': 'referents.get_referents', + 'get_referrers': 'referents.get_referrers', 'GcRef': 'referents.W_GcRef', }) MixedModule.__init__(self, space, w_name) Modified: pypy/branch/gc-module/pypy/module/gc/referents.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/referents.py (original) +++ pypy/branch/gc-module/pypy/module/gc/referents.py Wed Sep 8 10:30:33 2010 @@ -79,19 +79,23 @@ else: pending.append(gcref) -def get_objects(space): - """Return a list of all app-level objects.""" - roots = rgc.get_rpy_roots() - # start from the roots, which is a list of gcrefs that may or may not - # be W_Roots - pending_w = [] # <- list of W_Roots - for gcref in roots: +def _get_objects_from_rpy(list_of_gcrefs): + # given a list of gcrefs that may or may not be W_Roots, build a list + # of W_Roots obtained by following references from there. + result_w = [] # <- list of W_Roots + for gcref in list_of_gcrefs: if gcref: w_obj = try_cast_gcref_to_w_root(gcref) if w_obj is not None: - pending_w.append(w_obj) + result_w.append(w_obj) else: - _list_w_obj_referents(gcref, pending_w) + _list_w_obj_referents(gcref, result_w) + return result_w + +def get_objects(space): + """Return a list of all app-level objects.""" + roots = rgc.get_rpy_roots() + pending_w = _get_objects_from_rpy(roots) # continue by following every W_Root. Note that this will force a hash # on every W_Root, which is kind of bad, but not on every RPython object, # which is really good. @@ -116,3 +120,30 @@ _list_w_obj_referents(gcref, result) return space.newlist(result) get_referents.unwrap_spec = [ObjSpace, 'args_w'] + +def get_referrers(space, args_w): + """Return the list of objects that directly refer to any of objs.""" + roots = rgc.get_rpy_roots() + pending_w = _get_objects_from_rpy(roots) + arguments_w = {} + for w_obj in args_w: + arguments_w[w_obj] = None + # continue by following every W_Root. Same remark about hashes as + # in get_objects(). + result_w = {} + seen_w = {} + while len(pending_w) > 0: + previous_w = pending_w + pending_w = [] + for w_obj in previous_w: + if w_obj not in seen_w: + seen_w[w_obj] = None + gcref = rgc.cast_instance_to_gcref(w_obj) + referents_w = [] + _list_w_obj_referents(gcref, referents_w) + for w_subobj in referents_w: + if w_subobj in arguments_w: + result_w[w_obj] = None + pending_w += referents_w + return space.newlist(result_w.keys()) +get_referrers.unwrap_spec = [ObjSpace, 'args_w'] Modified: pypy/branch/gc-module/pypy/module/gc/test/test_referents.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/test/test_referents.py (original) +++ pypy/branch/gc-module/pypy/module/gc/test/test_referents.py Wed Sep 8 10:30:33 2010 @@ -93,3 +93,14 @@ x = [y, z] lst = gc.get_referents(x) assert y in lst and z in lst + + def test_get_referrers(self): + import gc + l27 = self.ALL_ROOTS[1] + i2, i7 = l27 + lst = gc.get_referrers(i7) + for x in lst: + if x is l27: + break # found + else: + assert 0, "the list [2, 7] is not found as gc.get_referrers(7)" From arigo at codespeak.net Wed Sep 8 10:42:10 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 8 Sep 2010 10:42:10 +0200 (CEST) Subject: [pypy-svn] r76934 - pypy/trunk/pypy/rlib Message-ID: <20100908084210.3A59D36C22F@codespeak.net> Author: arigo Date: Wed Sep 8 10:42:07 2010 New Revision: 76934 Modified: pypy/trunk/pypy/rlib/rzipfile.py Log: Move the constant 0xffffffff out of the RPython functions. Modified: pypy/trunk/pypy/rlib/rzipfile.py ============================================================================== --- pypy/trunk/pypy/rlib/rzipfile.py (original) +++ pypy/trunk/pypy/rlib/rzipfile.py Wed Sep 8 10:42:07 2010 @@ -16,15 +16,16 @@ crc_32_tab = import_from_lib_pypy('binascii').crc_32_tab rcrc_32_tab = [r_uint(i) for i in crc_32_tab] +mask32 = r_uint(0xffffffffL) def crc32(s, crc=0): result = 0 - crc = ~r_uint(crc) & 0xffffffffL + crc = ~r_uint(crc) & mask32 for c in s: - crc = rcrc_32_tab[(crc ^ r_uint(ord(c))) & 0xffL] ^ (crc >> 8) + crc = rcrc_32_tab[(crc ^ r_uint(ord(c))) & 0xff] ^ (crc >> 8) #/* Note: (crc >> 8) MUST zero fill on left - result = crc ^ 0xffffffffL + result = crc ^ mask32 return result @@ -194,7 +195,7 @@ (x.create_version, x.create_system, x.extract_version, x.reserved, x.flag_bits, x.compress_type, t, d, crc, x.compress_size, x.file_size) = centdir[1:12] - x.CRC = r_uint(crc) & 0xffffffff + x.CRC = r_uint(crc) & mask32 x.dostime = t x.dosdate = d x.volume, x.internal_attr, x.external_attr = centdir[15:18] From arigo at codespeak.net Wed Sep 8 10:47:43 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 8 Sep 2010 10:47:43 +0200 (CEST) Subject: [pypy-svn] r76935 - pypy/trunk/pypy/rlib Message-ID: <20100908084743.40DF9282BD6@codespeak.net> Author: arigo Date: Wed Sep 8 10:47:40 2010 New Revision: 76935 Modified: pypy/trunk/pypy/rlib/_rsocket_rffi.py Log: Revert r76928. It breaks tons of tests and there is no reason for this checkin alone. Modified: pypy/trunk/pypy/rlib/_rsocket_rffi.py ============================================================================== --- pypy/trunk/pypy/rlib/_rsocket_rffi.py (original) +++ pypy/trunk/pypy/rlib/_rsocket_rffi.py Wed Sep 8 10:47:40 2010 @@ -32,7 +32,6 @@ 'arpa/inet.h', 'stdint.h', 'errno.h', - 'netpacket/packet.h' ) cond_includes = [('AF_NETLINK', 'linux/netlink.h')] libraries = () @@ -274,7 +273,7 @@ ('nl_pid', rffi.INT), ('nl_groups', rffi.INT)], ifdef='AF_NETLINK') - + CConfig.addrinfo = platform.Struct('struct addrinfo', [('ai_flags', rffi.INT), ('ai_family', rffi.INT), @@ -310,15 +309,6 @@ [('fd', socketfd_type), ('events', rffi.SHORT), ('revents', rffi.SHORT)]) - - CConfig.sockaddr_ll = platform.Struct('struct sockaddr_ll', - [('sll_protocol', rffi.INT), - ('sll_pkttype', rffi.INT), - ('sll_hatype', rffi.INT), - ('sll_addr', CCHARP), - ('sll_halen', rffi.INT)], - ) - if _WIN32: CConfig.WSAEVENT = platform.SimpleType('WSAEVENT', rffi.VOIDP) CConfig.WSANETWORKEVENTS = platform.Struct( From arigo at codespeak.net Wed Sep 8 11:34:37 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 8 Sep 2010 11:34:37 +0200 (CEST) Subject: [pypy-svn] r76936 - in pypy/trunk/pypy: objspace/flow rlib Message-ID: <20100908093437.28465282BD6@codespeak.net> Author: arigo Date: Wed Sep 8 11:34:35 2010 New Revision: 76936 Modified: pypy/trunk/pypy/objspace/flow/specialcase.py pypy/trunk/pypy/rlib/rzipfile.py Log: Allow constant-folding to occur early for r_uint(long-value). It must be done before annotation, because now annotation explodes when seeing the long value. Modified: pypy/trunk/pypy/objspace/flow/specialcase.py ============================================================================== --- pypy/trunk/pypy/objspace/flow/specialcase.py (original) +++ pypy/trunk/pypy/objspace/flow/specialcase.py Wed Sep 8 11:34:35 2010 @@ -3,6 +3,7 @@ from pypy.interpreter.gateway import ApplevelClass from pypy.interpreter.error import OperationError from pypy.tool.cache import Cache +from pypy.rlib.rarithmetic import r_uint import py def sc_import(space, fn, args): @@ -120,6 +121,14 @@ pass return space.do_operation('simple_call', Constant(func), *args_w) +def sc_r_uint(space, r_uint, args): + args_w, kwds_w = args.unpack() + assert not kwds_w + [w_value] = args_w + if isinstance(w_value, Constant): + return Constant(r_uint(w_value.value)) + return space.do_operation('simple_call', space.wrap(r_uint), w_value) + def setup(space): # fn = pyframe.normalize_exception.get_function(space) # this is now routed through the objspace, directly. @@ -131,3 +140,7 @@ # if possible for fn in OperationName: space.specialcases[fn] = sc_operator + # special case to constant-fold r_uint(32-bit-constant) + # (normally, the 32-bit constant is a long, and is not allowed to + # show up in the flow graphs at all) + space.specialcases[r_uint] = sc_r_uint Modified: pypy/trunk/pypy/rlib/rzipfile.py ============================================================================== --- pypy/trunk/pypy/rlib/rzipfile.py (original) +++ pypy/trunk/pypy/rlib/rzipfile.py Wed Sep 8 11:34:35 2010 @@ -16,16 +16,15 @@ crc_32_tab = import_from_lib_pypy('binascii').crc_32_tab rcrc_32_tab = [r_uint(i) for i in crc_32_tab] -mask32 = r_uint(0xffffffffL) def crc32(s, crc=0): result = 0 - crc = ~r_uint(crc) & mask32 + crc = ~r_uint(crc) & r_uint(0xffffffffL) for c in s: - crc = rcrc_32_tab[(crc ^ r_uint(ord(c))) & 0xff] ^ (crc >> 8) + crc = rcrc_32_tab[(crc ^ r_uint(ord(c))) & 0xffL] ^ (crc >> 8) #/* Note: (crc >> 8) MUST zero fill on left - result = crc ^ mask32 + result = crc ^ r_uint(0xffffffffL) return result @@ -195,7 +194,7 @@ (x.create_version, x.create_system, x.extract_version, x.reserved, x.flag_bits, x.compress_type, t, d, crc, x.compress_size, x.file_size) = centdir[1:12] - x.CRC = r_uint(crc) & mask32 + x.CRC = r_uint(crc) & r_uint(0xffffffff) x.dostime = t x.dosdate = d x.volume, x.internal_attr, x.external_attr = centdir[15:18] From arigo at codespeak.net Wed Sep 8 11:56:50 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 8 Sep 2010 11:56:50 +0200 (CEST) Subject: [pypy-svn] r76937 - pypy/trunk/pypy/translator/platform Message-ID: <20100908095650.829FD282BD6@codespeak.net> Author: arigo Date: Wed Sep 8 11:56:49 2010 New Revision: 76937 Modified: pypy/trunk/pypy/translator/platform/posix.py Log: Temporary fix. It seems that on 32-bit linux, compiling with -fPIC gives assembler that asmgcc is not happy about. Modified: pypy/trunk/pypy/translator/platform/posix.py ============================================================================== --- pypy/trunk/pypy/translator/platform/posix.py (original) +++ pypy/trunk/pypy/translator/platform/posix.py Wed Sep 8 11:56:49 2010 @@ -4,7 +4,7 @@ from pypy.translator.platform import Platform, log, _run_subprocess from pypy.tool import autopath -import py, os +import py, os, sys class BasePosix(Platform): exe_ext = '' @@ -104,10 +104,12 @@ else: target_name = exe_name.basename - if shared: - cflags = self.cflags + self.shared_only - else: - cflags = self.cflags + self.standalone_only + cflags = self.cflags + if sys.maxint > 2147483647: # XXX XXX XXX sort this out + if shared: + cflags = self.cflags + self.shared_only + else: + cflags = self.cflags + self.standalone_only m = GnuMakefile(path) m.exe_name = exe_name From arigo at codespeak.net Wed Sep 8 12:04:26 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 8 Sep 2010 12:04:26 +0200 (CEST) Subject: [pypy-svn] r76938 - pypy/trunk/pypy/rlib Message-ID: <20100908100426.6861A282BD6@codespeak.net> Author: arigo Date: Wed Sep 8 12:04:24 2010 New Revision: 76938 Modified: pypy/trunk/pypy/rlib/rsha.py Log: These constants also need a r_uint() now. Modified: pypy/trunk/pypy/rlib/rsha.py ============================================================================== --- pypy/trunk/pypy/rlib/rsha.py (original) +++ pypy/trunk/pypy/rlib/rsha.py Wed Sep 8 12:04:24 2010 @@ -88,7 +88,7 @@ 0xCA62C1D6L # (60 <= t <= 79) ] -unroll_f_K = unrolling_iterable(zip(f, K)) +unroll_f_K = unrolling_iterable(zip(f, map(r_uint, K))) if UNROLL_ALL: unroll_range_20 = unrolling_iterable(range(20)) From fijal at codespeak.net Wed Sep 8 13:40:51 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 8 Sep 2010 13:40:51 +0200 (CEST) Subject: [pypy-svn] r76940 - pypy/branch/rsocket-improvements Message-ID: <20100908114051.6FD6E282BD6@codespeak.net> Author: fijal Date: Wed Sep 8 13:40:44 2010 New Revision: 76940 Added: pypy/branch/rsocket-improvements/ (props changed) - copied from r76939, pypy/trunk/ Log: Create a branch to implement missing features for rsocket (and _socket) From arigo at codespeak.net Wed Sep 8 13:41:50 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 8 Sep 2010 13:41:50 +0200 (CEST) Subject: [pypy-svn] r76941 - pypy/branch/jit-generator Message-ID: <20100908114150.03A63282BD6@codespeak.net> Author: arigo Date: Wed Sep 8 13:41:49 2010 New Revision: 76941 Added: pypy/branch/jit-generator/ - copied from r76940, pypy/trunk/ Log: A branch to try to support generators nicely with the JIT. From arigo at codespeak.net Wed Sep 8 14:00:39 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 8 Sep 2010 14:00:39 +0200 (CEST) Subject: [pypy-svn] r76942 - pypy/branch/jit-generator/pypy/module/pypyjit Message-ID: <20100908120039.415D3282BD6@codespeak.net> Author: arigo Date: Wed Sep 8 14:00:37 2010 New Revision: 76942 Modified: pypy/branch/jit-generator/pypy/module/pypyjit/interp_jit.py pypy/branch/jit-generator/pypy/module/pypyjit/policy.py Log: First attempt. Modified: pypy/branch/jit-generator/pypy/module/pypyjit/interp_jit.py ============================================================================== --- pypy/branch/jit-generator/pypy/module/pypyjit/interp_jit.py (original) +++ pypy/branch/jit-generator/pypy/module/pypyjit/interp_jit.py Wed Sep 8 14:00:37 2010 @@ -60,7 +60,10 @@ set_jitcell_at = set_jitcell_at, confirm_enter_jit = confirm_enter_jit) +PyFrame__execute_generator_frame = PyFrame.execute_generator_frame + class __extend__(PyFrame): + last_yield = -1 def dispatch(self, pycode, next_instr, ec): self = hint(self, access_directly=True) @@ -75,7 +78,16 @@ except ExitFrame: return self.popvalue() + def execute_generator_frame(self, w_inputvalue, operr=None): + self.last_yield = self.last_instr + return PyFrame__execute_generator_frame(self, w_inputvalue, operr) + def jump_absolute(self, jumpto, _, ec=None): + if jumpto <= self.last_yield: + # Here we are in a generator, closing the loop that did a YIELD. + # In that case, we should not consider this a loop at all. + self.last_yield = -1 + return jumpto if we_are_jitted(): self.last_instr = intmask(jumpto) ec.bytecode_trace(self) Modified: pypy/branch/jit-generator/pypy/module/pypyjit/policy.py ============================================================================== --- pypy/branch/jit-generator/pypy/module/pypyjit/policy.py (original) +++ pypy/branch/jit-generator/pypy/module/pypyjit/policy.py Wed Sep 8 14:00:37 2010 @@ -32,8 +32,6 @@ return False if mod.startswith('pypy.interpreter.pyparser.'): return False - if mod == 'pypy.interpreter.generator': - return False if mod.startswith('pypy.module.'): modname = mod[len('pypy.module.'):] if not self.look_inside_pypy_module(modname): From arigo at codespeak.net Wed Sep 8 14:02:17 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 8 Sep 2010 14:02:17 +0200 (CEST) Subject: [pypy-svn] r76943 - pypy/branch/jit-generator/pypy/module/pypyjit Message-ID: <20100908120217.B0899282BD6@codespeak.net> Author: arigo Date: Wed Sep 8 14:02:16 2010 New Revision: 76943 Modified: pypy/branch/jit-generator/pypy/module/pypyjit/interp_jit.py Log: Re-enable generators. Modified: pypy/branch/jit-generator/pypy/module/pypyjit/interp_jit.py ============================================================================== --- pypy/branch/jit-generator/pypy/module/pypyjit/interp_jit.py (original) +++ pypy/branch/jit-generator/pypy/module/pypyjit/interp_jit.py Wed Sep 8 14:02:16 2010 @@ -9,7 +9,7 @@ import pypy.interpreter.pyopcode # for side-effects from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import ObjSpace, Arguments -from pypy.interpreter.pycode import PyCode, CO_GENERATOR +from pypy.interpreter.pycode import PyCode from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pyopcode import ExitFrame from opcode import opmap @@ -36,8 +36,7 @@ bytecode.jit_cells[next_instr] = newcell def confirm_enter_jit(next_instr, bytecode, frame, ec): - return (not (bytecode.co_flags & CO_GENERATOR) and - frame.w_f_trace is None and + return (frame.w_f_trace is None and ec.profilefunc is None and ec.w_tracefunc is None) From afa at codespeak.net Wed Sep 8 15:37:58 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Wed, 8 Sep 2010 15:37:58 +0200 (CEST) Subject: [pypy-svn] r76945 - pypy/trunk/pypy/rpython/lltypesystem/test Message-ID: <20100908133758.D969E282BD6@codespeak.net> Author: afa Date: Wed Sep 8 15:37:56 2010 New Revision: 76945 Modified: pypy/trunk/pypy/rpython/lltypesystem/test/test_rffi.py Log: Fix test_rffi on Windows, which aborts() in os.write when an invalid fd is used. Use a non-writable file instead. Modified: pypy/trunk/pypy/rpython/lltypesystem/test/test_rffi.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/test/test_rffi.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/test/test_rffi.py Wed Sep 8 15:37:56 2010 @@ -186,6 +186,11 @@ def test_externvar(self): import os + if os.name == 'nt': + # Windows CRT badly aborts when an invalid fd is used. + bad_fd = 0 + else: + bad_fd = 12312312 def f(): set_errno(12) @@ -193,7 +198,7 @@ def g(): try: - os.write(12312312, "xxx") + os.write(bad_fd, "xxx") except OSError: pass return get_errno() From arigo at codespeak.net Wed Sep 8 16:11:31 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 8 Sep 2010 16:11:31 +0200 (CEST) Subject: [pypy-svn] r76946 - in pypy/branch/jit-generator/pypy: jit/metainterp jit/metainterp/test rlib Message-ID: <20100908141131.E721C282BD6@codespeak.net> Author: arigo Date: Wed Sep 8 16:11:29 2010 New Revision: 76946 Modified: pypy/branch/jit-generator/pypy/jit/metainterp/test/test_basic.py pypy/branch/jit-generator/pypy/jit/metainterp/test/test_warmstate.py pypy/branch/jit-generator/pypy/jit/metainterp/warmspot.py pypy/branch/jit-generator/pypy/jit/metainterp/warmstate.py pypy/branch/jit-generator/pypy/rlib/jit.py Log: Implement the jit hook "can_never_inline", to be used on generators. Modified: pypy/branch/jit-generator/pypy/jit/metainterp/test/test_basic.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/test/test_basic.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/test/test_basic.py Wed Sep 8 16:11:29 2010 @@ -530,6 +530,32 @@ assert res == -2 self.check_loop_count(1) + def test_can_never_inline(self): + def can_never_inline(x): + return x > 50 + myjitdriver = JitDriver(greens = ['x'], reds = ['y'], + can_never_inline = can_never_inline) + @dont_look_inside + def marker(): + pass + def f(x, y): + while y >= 0: + myjitdriver.can_enter_jit(x=x, y=y) + myjitdriver.jit_merge_point(x=x, y=y) + x += 1 + if x == 4 or x == 61: + marker() + y -= x + return y + # + res = self.meta_interp(f, [3, 6], repeat=7) + assert res == 6 - 4 - 5 + self.check_history(call=0) # because the trace starts in the middle + # + res = self.meta_interp(f, [60, 84], repeat=7) + assert res == 84 - 61 - 62 + self.check_history(call=1) # because the trace starts immediately + def test_format(self): def f(n): return len("<%d>" % n) Modified: pypy/branch/jit-generator/pypy/jit/metainterp/test/test_warmstate.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/test/test_warmstate.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/test/test_warmstate.py Wed Sep 8 16:11:29 2010 @@ -166,6 +166,7 @@ _green_args_spec = [lltype.Signed, lltype.Float] _get_printable_location_ptr = None _confirm_enter_jit_ptr = None + _can_never_inline_ptr = None class FakeCell: dont_trace_here = False state = WarmEnterState(None, FakeJitDriverSD()) @@ -189,6 +190,7 @@ _green_args_spec = [lltype.Signed, lltype.Float] _get_printable_location_ptr = llhelper(GET_LOCATION, get_location) _confirm_enter_jit_ptr = None + _can_never_inline_ptr = None _get_jitcell_at_ptr = None state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) state.make_jitdriver_callbacks() @@ -209,9 +211,31 @@ _green_args_spec = [lltype.Signed, lltype.Float] _get_printable_location_ptr = None _confirm_enter_jit_ptr = llhelper(ENTER_JIT, confirm_enter_jit) + _can_never_inline_ptr = None _get_jitcell_at_ptr = None state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) state.make_jitdriver_callbacks() res = state.confirm_enter_jit(5, 42.5, 3) assert res is True + +def test_make_jitdriver_callbacks_5(): + def can_never_inline(x, y): + assert x == 5 + assert y == 42.5 + return True + CAN_NEVER_INLINE = lltype.Ptr(lltype.FuncType( + [lltype.Signed, lltype.Float], lltype.Bool)) + class FakeWarmRunnerDesc: + rtyper = None + class FakeJitDriverSD: + _green_args_spec = [lltype.Signed, lltype.Float] + _get_printable_location_ptr = None + _confirm_enter_jit_ptr = None + _can_never_inline_ptr = llhelper(CAN_NEVER_INLINE, can_never_inline) + _get_jitcell_at_ptr = None + + state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + state.make_jitdriver_callbacks() + res = state.can_never_inline(5, 42.5) + assert res is True Modified: pypy/branch/jit-generator/pypy/jit/metainterp/warmspot.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/warmspot.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/warmspot.py Wed Sep 8 16:11:29 2010 @@ -425,6 +425,8 @@ jd._confirm_enter_jit_ptr = self._make_hook_graph(jd, annhelper, jd.jitdriver.confirm_enter_jit, annmodel.s_Bool, onlygreens=False) + jd._can_never_inline_ptr = self._make_hook_graph(jd, + annhelper, jd.jitdriver.can_never_inline, annmodel.s_Bool) annhelper.finish() def _make_hook_graph(self, jitdriver_sd, annhelper, func, Modified: pypy/branch/jit-generator/pypy/jit/metainterp/warmstate.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/warmstate.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/warmstate.py Wed Sep 8 16:11:29 2010 @@ -493,6 +493,8 @@ jit_getter = self.make_jitcell_getter() def can_inline_greenargs(*greenargs): + if can_never_inline(*greenargs): + return False cell = jit_getter(False, *greenargs) if cell is not None and cell.dont_trace_here: return False @@ -546,3 +548,16 @@ confirm_enter_jit_ptr) return fn(*args) self.confirm_enter_jit = confirm_enter_jit + # + can_never_inline_ptr = self.jitdriver_sd._can_never_inline_ptr + if can_never_inline_ptr is None: + def can_never_inline(*greenargs): + return False + else: + rtyper = self.warmrunnerdesc.rtyper + # + def can_never_inline(*greenargs): + fn = support.maybe_on_top_of_llinterp(rtyper, + can_never_inline_ptr) + return fn(*greenargs) + self.can_never_inline = can_never_inline Modified: pypy/branch/jit-generator/pypy/rlib/jit.py ============================================================================== --- pypy/branch/jit-generator/pypy/rlib/jit.py (original) +++ pypy/branch/jit-generator/pypy/rlib/jit.py Wed Sep 8 16:11:29 2010 @@ -253,7 +253,8 @@ def __init__(self, greens=None, reds=None, virtualizables=None, get_jitcell_at=None, set_jitcell_at=None, - get_printable_location=None, confirm_enter_jit=None): + get_printable_location=None, confirm_enter_jit=None, + can_never_inline=None): if greens is not None: self.greens = greens if reds is not None: @@ -270,6 +271,7 @@ self.set_jitcell_at = set_jitcell_at self.get_printable_location = get_printable_location self.confirm_enter_jit = confirm_enter_jit + self.can_never_inline = can_never_inline def _freeze_(self): return True From arigo at codespeak.net Wed Sep 8 16:26:35 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 8 Sep 2010 16:26:35 +0200 (CEST) Subject: [pypy-svn] r76947 - pypy/branch/jit-generator/pypy/module/pypyjit Message-ID: <20100908142635.97536282BD6@codespeak.net> Author: arigo Date: Wed Sep 8 16:26:34 2010 New Revision: 76947 Modified: pypy/branch/jit-generator/pypy/module/pypyjit/interp_jit.py Log: Try to use the hook. Modified: pypy/branch/jit-generator/pypy/module/pypyjit/interp_jit.py ============================================================================== --- pypy/branch/jit-generator/pypy/module/pypyjit/interp_jit.py (original) +++ pypy/branch/jit-generator/pypy/module/pypyjit/interp_jit.py Wed Sep 8 16:26:34 2010 @@ -9,7 +9,7 @@ import pypy.interpreter.pyopcode # for side-effects from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import ObjSpace, Arguments -from pypy.interpreter.pycode import PyCode +from pypy.interpreter.pycode import PyCode, CO_GENERATOR from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pyopcode import ExitFrame from opcode import opmap @@ -40,6 +40,9 @@ ec.profilefunc is None and ec.w_tracefunc is None) +def can_never_inline(next_instr, bytecode): + return (bytecode.co_flags & CO_GENERATOR) != 0 + class PyPyJitDriver(JitDriver): reds = ['frame', 'ec'] @@ -57,12 +60,10 @@ pypyjitdriver = PyPyJitDriver(get_printable_location = get_printable_location, get_jitcell_at = get_jitcell_at, set_jitcell_at = set_jitcell_at, - confirm_enter_jit = confirm_enter_jit) - -PyFrame__execute_generator_frame = PyFrame.execute_generator_frame + confirm_enter_jit = confirm_enter_jit, + can_never_inline = can_never_inline) class __extend__(PyFrame): - last_yield = -1 def dispatch(self, pycode, next_instr, ec): self = hint(self, access_directly=True) @@ -77,16 +78,7 @@ except ExitFrame: return self.popvalue() - def execute_generator_frame(self, w_inputvalue, operr=None): - self.last_yield = self.last_instr - return PyFrame__execute_generator_frame(self, w_inputvalue, operr) - def jump_absolute(self, jumpto, _, ec=None): - if jumpto <= self.last_yield: - # Here we are in a generator, closing the loop that did a YIELD. - # In that case, we should not consider this a loop at all. - self.last_yield = -1 - return jumpto if we_are_jitted(): self.last_instr = intmask(jumpto) ec.bytecode_trace(self) From fijal at codespeak.net Wed Sep 8 19:22:09 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 8 Sep 2010 19:22:09 +0200 (CEST) Subject: [pypy-svn] r76951 - in pypy/branch/rsocket-improvements/pypy: module/_socket module/_socket/test rlib Message-ID: <20100908172209.A1006282BD6@codespeak.net> Author: fijal Date: Wed Sep 8 19:21:55 2010 New Revision: 76951 Modified: pypy/branch/rsocket-improvements/pypy/module/_socket/interp_func.py pypy/branch/rsocket-improvements/pypy/module/_socket/interp_socket.py pypy/branch/rsocket-improvements/pypy/module/_socket/test/test_sock_app.py pypy/branch/rsocket-improvements/pypy/rlib/_rsocket_rffi.py pypy/branch/rsocket-improvements/pypy/rlib/rsocket.py Log: Implement AF_PACKET support. part 1 - interface name Modified: pypy/branch/rsocket-improvements/pypy/module/_socket/interp_func.py ============================================================================== --- pypy/branch/rsocket-improvements/pypy/module/_socket/interp_func.py (original) +++ pypy/branch/rsocket-improvements/pypy/module/_socket/interp_func.py Wed Sep 8 19:21:55 2010 @@ -280,7 +280,7 @@ space.wrap(socktype), space.wrap(protocol), space.wrap(canonname), - addr.as_object(space)]) + addr.as_object(-1, space)]) # -1 as per cpython for (family, socktype, protocol, canonname, addr) in lst] return space.newlist(lst1) getaddrinfo.unwrap_spec = [ObjSpace, W_Root, W_Root, int, int, int, int] Modified: pypy/branch/rsocket-improvements/pypy/module/_socket/interp_socket.py ============================================================================== --- pypy/branch/rsocket-improvements/pypy/module/_socket/interp_socket.py (original) +++ pypy/branch/rsocket-improvements/pypy/module/_socket/interp_socket.py Wed Sep 8 19:21:55 2010 @@ -24,7 +24,7 @@ try: sock, addr = self.accept(W_RSocket) return space.newtuple([space.wrap(sock), - addr.as_object(space)]) + addr.as_object(sock.fd, space)]) except SocketError, e: raise converted_error(space, e) accept_w.unwrap_spec = ['self', ObjSpace] @@ -109,7 +109,7 @@ """ try: addr = self.getpeername() - return addr.as_object(space) + return addr.as_object(self.fd, space) except SocketError, e: raise converted_error(space, e) getpeername_w.unwrap_spec = ['self', ObjSpace] @@ -122,7 +122,7 @@ """ try: addr = self.getsockname() - return addr.as_object(space) + return addr.as_object(self.fd, space) except SocketError, e: raise converted_error(space, e) getsockname_w.unwrap_spec = ['self', ObjSpace] @@ -202,7 +202,7 @@ try: data, addr = self.recvfrom(buffersize, flags) if addr: - w_addr = addr.as_object(space) + w_addr = addr.as_object(self.fd, space) else: w_addr = space.w_None return space.newtuple([space.wrap(data), w_addr]) @@ -330,7 +330,7 @@ try: readlgt, addr = self.recvfrom_into(rwbuffer, nbytes, flags) if addr: - w_addr = addr.as_object(space) + w_addr = addr.as_object(self.fd, space) else: w_addr = space.w_None return space.newtuple([space.wrap(readlgt), w_addr]) Modified: pypy/branch/rsocket-improvements/pypy/module/_socket/test/test_sock_app.py ============================================================================== --- pypy/branch/rsocket-improvements/pypy/module/_socket/test/test_sock_app.py (original) +++ pypy/branch/rsocket-improvements/pypy/module/_socket/test/test_sock_app.py Wed Sep 8 19:21:55 2010 @@ -2,6 +2,8 @@ import sys import py from pypy.tool.udir import udir +from pypy.rlib import rsocket +from pypy.rpython.lltypesystem import lltype, rffi def setup_module(mod): mod.space = gettestobjspace(usemodules=['_socket', 'array']) @@ -221,21 +223,35 @@ "(_socket, host, port): return _socket.getaddrinfo(host, port)") assert space.unwrap(w_l) == info -def test_unknown_addr_as_object(): - from pypy.rlib import rsocket - from pypy.rpython.lltypesystem import lltype, rffi - +def test_unknown_addr_as_object(): c_addr = lltype.malloc(rsocket._c.sockaddr, flavor='raw') c_addr.c_sa_data[0] = 'c' rffi.setintfield(c_addr, 'c_sa_family', 15) # XXX what size to pass here? for the purpose of this test it has # to be short enough so we have some data, 1 sounds good enough # + sizeof USHORT - w_obj = rsocket.Address(c_addr, 1 + 2).as_object(space) + w_obj = rsocket.Address(c_addr, 1 + 2).as_object(-1, space) assert space.is_true(space.isinstance(w_obj, space.w_tuple)) assert space.int_w(space.getitem(w_obj, space.wrap(0))) == 15 assert space.str_w(space.getitem(w_obj, space.wrap(1))) == 'c' +def test_addr_raw_packet(): + if not hasattr(rsocket._c, 'sockaddr_ll'): + py.test.skip("posix specific test") + c_addr_ll = lltype.malloc(rsocket._c.sockaddr_ll, flavor='raw') + addrlen = rffi.sizeof(rsocket._c.sockaddr_ll) + c_addr = rffi.cast(lltype.Ptr(rsocket._c.sockaddr), c_addr_ll) + rffi.setintfield(c_addr_ll, 'c_sll_ifindex', 1) + rffi.setintfield(c_addr_ll, 'c_sll_protocol', 8) + rffi.setintfield(c_addr, 'c_sa_family', socket.AF_PACKET) + # fd needs to be somehow valid + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + fd = s.fileno() + w_obj = rsocket.make_address(c_addr, addrlen).as_object(fd, space) + assert space.is_true(space.eq(w_obj, space.newtuple([ + space.wrap('lo'), + space.w_None]))) + def test_getnameinfo(): host = "127.0.0.1" port = 25 Modified: pypy/branch/rsocket-improvements/pypy/rlib/_rsocket_rffi.py ============================================================================== --- pypy/branch/rsocket-improvements/pypy/rlib/_rsocket_rffi.py (original) +++ pypy/branch/rsocket-improvements/pypy/rlib/_rsocket_rffi.py Wed Sep 8 19:21:55 2010 @@ -32,6 +32,9 @@ 'arpa/inet.h', 'stdint.h', 'errno.h', + 'netpacket/packet.h', + 'sys/ioctl.h', + 'net/if.h', ) cond_includes = [('AF_NETLINK', 'linux/netlink.h')] libraries = () @@ -190,6 +193,8 @@ FD_CONNECT_BIT FD_CLOSE_BIT WSA_IO_PENDING WSA_IO_INCOMPLETE WSA_INVALID_HANDLE WSA_INVALID_PARAMETER WSA_NOT_ENOUGH_MEMORY WSA_OPERATION_ABORTED + +SIOCGIFNAME '''.split() for name in constant_names: @@ -309,6 +314,19 @@ [('fd', socketfd_type), ('events', rffi.SHORT), ('revents', rffi.SHORT)]) + + CConfig.sockaddr_ll = platform.Struct('struct sockaddr_ll', + [('sll_ifindex', rffi.INT), + ('sll_protocol', rffi.INT), + ('sll_pkttype', rffi.INT), + ('sll_hatype', rffi.INT), + ('sll_addr', rffi.CFixedArray(rffi.CHAR, 8)), + ('sll_halen', rffi.INT)], + ) + + CConfig.ifreq = platform.Struct('struct ifreq', [('ifr_ifindex', rffi.INT), + ('ifr_name', rffi.CFixedArray(rffi.CHAR, 8))]) + if _WIN32: CConfig.WSAEVENT = platform.SimpleType('WSAEVENT', rffi.VOIDP) CConfig.WSANETWORKEVENTS = platform.Struct( @@ -408,6 +426,8 @@ if _POSIX: nfds_t = cConfig.nfds_t pollfd = cConfig.pollfd + sockaddr_ll = cConfig.sockaddr_ll + ifreq = cConfig.ifreq if WIN32: WSAEVENT = cConfig.WSAEVENT WSANETWORKEVENTS = cConfig.WSANETWORKEVENTS @@ -510,6 +530,8 @@ socketpair_t = rffi.CArray(socketfd_type) socketpair = external('socketpair', [rffi.INT, rffi.INT, rffi.INT, lltype.Ptr(socketpair_t)], rffi.INT) + ioctl = external('ioctl', [socketfd_type, rffi.INT, lltype.Ptr(ifreq)], + rffi.INT) if _WIN32: ioctlsocket = external('ioctlsocket', Modified: pypy/branch/rsocket-improvements/pypy/rlib/rsocket.py ============================================================================== --- pypy/branch/rsocket-improvements/pypy/rlib/rsocket.py (original) +++ pypy/branch/rsocket-improvements/pypy/rlib/rsocket.py Wed Sep 8 19:21:55 2010 @@ -6,8 +6,7 @@ # Known missing features: # -# - support for non-Linux platforms -# - address families other than AF_INET, AF_INET6, AF_UNIX +# - address families other than AF_INET, AF_INET6, AF_UNIX, AF_PACKET # - methods makefile(), # - SSL # @@ -109,7 +108,7 @@ """ keepalive_until_here(self) - def as_object(self, space): + def as_object(self, fd, space): """Convert the address to an app-level object.""" # If we don't know the address family, don't raise an # exception -- return it as a tuple. @@ -200,6 +199,31 @@ # ____________________________________________________________ +class PacketAddress(Address): + family = AF_PACKET + struct = _c.sockaddr_ll + maxlen = minlen = sizeof(struct) + + def get_ifname(self, fd): + a = self.lock(_c.sockaddr_ll) + p = lltype.malloc(_c.ifreq, flavor='raw') + rffi.setintfield(p, 'c_ifr_ifindex', + rffi.getintfield(a, 'c_sll_ifindex')) + if (_c.ioctl(fd, _c.SIOCGIFNAME, p) == 0): + ifname = rffi.charp2str(p.c_ifr_name) + else: + ifname = "" + lltype.free(p, flavor='raw') + self.unlock() + return ifname + + def get_protocol(self): + a = self.lock + + def as_object(self, fd, space): + return space.newtuple([space.wrap(self.get_ifname(fd)), + space.wrap(self.get_protocol())]) + class INETAddress(IPAddress): family = AF_INET struct = _c.sockaddr_in @@ -228,7 +252,7 @@ self.get_host() == other.get_host() and self.get_port() == other.get_port()) - def as_object(self, space): + def as_object(self, fd, space): return space.newtuple([space.wrap(self.get_host()), space.wrap(self.get_port())]) @@ -317,7 +341,7 @@ self.get_flowinfo() == other.get_flowinfo() and self.get_scope_id() == other.get_scope_id()) - def as_object(self, space): + def as_object(self, fd, space): return space.newtuple([space.wrap(self.get_host()), space.wrap(self.get_port()), space.wrap(self.get_flowinfo()), @@ -421,7 +445,7 @@ return (isinstance(other, UNIXAddress) and self.get_path() == other.get_path()) - def as_object(self, space): + def as_object(self, fd, space): return space.wrap(self.get_path()) def from_object(space, w_address): @@ -456,7 +480,7 @@ def __repr__(self): return '' % (self.get_pid(), self.get_groups()) - def as_object(self, space): + def as_object(self, fd, space): return space.newtuple([space.wrap(self.get_pid()), space.wrap(self.get_groups())]) @@ -613,7 +637,7 @@ # convert an Address into an app-level object def addr_as_object(self, space, address): - return address.as_object(space) + return address.as_object(self.fd, space) # convert an app-level object into an Address # based on the current socket's family From fijal at codespeak.net Wed Sep 8 19:47:51 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 8 Sep 2010 19:47:51 +0200 (CEST) Subject: [pypy-svn] r76952 - in pypy/branch/rsocket-improvements/pypy: module/_socket/test rlib Message-ID: <20100908174751.49EEB282BD6@codespeak.net> Author: fijal Date: Wed Sep 8 19:47:49 2010 New Revision: 76952 Modified: pypy/branch/rsocket-improvements/pypy/module/_socket/test/test_sock_app.py pypy/branch/rsocket-improvements/pypy/rlib/rsocket.py Log: hopefully finish support for AF_PACKET Modified: pypy/branch/rsocket-improvements/pypy/module/_socket/test/test_sock_app.py ============================================================================== --- pypy/branch/rsocket-improvements/pypy/module/_socket/test/test_sock_app.py (original) +++ pypy/branch/rsocket-improvements/pypy/module/_socket/test/test_sock_app.py Wed Sep 8 19:47:49 2010 @@ -243,6 +243,12 @@ c_addr = rffi.cast(lltype.Ptr(rsocket._c.sockaddr), c_addr_ll) rffi.setintfield(c_addr_ll, 'c_sll_ifindex', 1) rffi.setintfield(c_addr_ll, 'c_sll_protocol', 8) + rffi.setintfield(c_addr_ll, 'c_sll_pkttype', 13) + rffi.setintfield(c_addr_ll, 'c_sll_hatype', 0) + rffi.setintfield(c_addr_ll, 'c_sll_halen', 3) + c_addr_ll.c_sll_addr[0] = 'a' + c_addr_ll.c_sll_addr[1] = 'b' + c_addr_ll.c_sll_addr[2] = 'c' rffi.setintfield(c_addr, 'c_sa_family', socket.AF_PACKET) # fd needs to be somehow valid s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) @@ -250,7 +256,11 @@ w_obj = rsocket.make_address(c_addr, addrlen).as_object(fd, space) assert space.is_true(space.eq(w_obj, space.newtuple([ space.wrap('lo'), - space.w_None]))) + space.wrap(2048), + space.wrap(13), + space.wrap(False), + space.wrap("abc"), + ]))) def test_getnameinfo(): host = "127.0.0.1" Modified: pypy/branch/rsocket-improvements/pypy/rlib/rsocket.py ============================================================================== --- pypy/branch/rsocket-improvements/pypy/rlib/rsocket.py (original) +++ pypy/branch/rsocket-improvements/pypy/rlib/rsocket.py Wed Sep 8 19:47:49 2010 @@ -218,11 +218,36 @@ return ifname def get_protocol(self): - a = self.lock + a = self.lock(_c.sockaddr_ll) + res = ntohs(rffi.getintfield(a, 'c_sll_protocol')) + self.unlock() + return res + + def get_pkttype(self): + a = self.lock(_c.sockaddr_ll) + res = rffi.getintfield(a, 'c_sll_pkttype') + self.unlock() + return res + + def get_hatype(self): + a = self.lock(_c.sockaddr_ll) + res = bool(rffi.getintfield(a, 'c_sll_hatype')) + self.unlock() + return res + + def get_addr(self): + a = self.lock(_c.sockaddr_ll) + lgt = rffi.getintfield(a, 'c_sll_halen') + res = rffi.charpsize2str(a.c_sll_addr, lgt) + self.unlock() + return res def as_object(self, fd, space): return space.newtuple([space.wrap(self.get_ifname(fd)), - space.wrap(self.get_protocol())]) + space.wrap(self.get_protocol()), + space.wrap(self.get_pkttype()), + space.wrap(self.get_hatype()), + space.wrap(self.get_addr())]) class INETAddress(IPAddress): family = AF_INET From fijal at codespeak.net Wed Sep 8 21:00:46 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 8 Sep 2010 21:00:46 +0200 (CEST) Subject: [pypy-svn] r76953 - pypy/branch/rsocket-improvements/pypy/rlib Message-ID: <20100908190046.1CDB1282BD6@codespeak.net> Author: fijal Date: Wed Sep 8 21:00:44 2010 New Revision: 76953 Modified: pypy/branch/rsocket-improvements/pypy/rlib/rsocket.py Log: Fix translation by handling conversion char* -> str by hand Modified: pypy/branch/rsocket-improvements/pypy/rlib/rsocket.py ============================================================================== --- pypy/branch/rsocket-improvements/pypy/rlib/rsocket.py (original) +++ pypy/branch/rsocket-improvements/pypy/rlib/rsocket.py Wed Sep 8 21:00:44 2010 @@ -210,7 +210,13 @@ rffi.setintfield(p, 'c_ifr_ifindex', rffi.getintfield(a, 'c_sll_ifindex')) if (_c.ioctl(fd, _c.SIOCGIFNAME, p) == 0): - ifname = rffi.charp2str(p.c_ifr_name) + # eh, the iface name is a constant length array + i = 0 + d = [] + while p.c_ifr_name[i] != '\x00' and i < len(p.c_ifr_name): + d.append(p.c_ifr_name[i]) + i += 1 + ifname = ''.join(d) else: ifname = "" lltype.free(p, flavor='raw') @@ -238,7 +244,10 @@ def get_addr(self): a = self.lock(_c.sockaddr_ll) lgt = rffi.getintfield(a, 'c_sll_halen') - res = rffi.charpsize2str(a.c_sll_addr, lgt) + d = [] + for i in range(lgt): + d.append(a.c_sll_addr[i]) + res = "".join(d) self.unlock() return res From fijal at codespeak.net Wed Sep 8 21:15:59 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 8 Sep 2010 21:15:59 +0200 (CEST) Subject: [pypy-svn] r76954 - pypy/branch/rsocket-improvements/pypy/module/_socket/test Message-ID: <20100908191559.C0858282BD6@codespeak.net> Author: fijal Date: Wed Sep 8 21:15:55 2010 New Revision: 76954 Modified: pypy/branch/rsocket-improvements/pypy/module/_socket/test/test_sock_app.py Log: make test slightly more robust Modified: pypy/branch/rsocket-improvements/pypy/module/_socket/test/test_sock_app.py ============================================================================== --- pypy/branch/rsocket-improvements/pypy/module/_socket/test/test_sock_app.py (original) +++ pypy/branch/rsocket-improvements/pypy/module/_socket/test/test_sock_app.py Wed Sep 8 21:15:55 2010 @@ -256,7 +256,7 @@ w_obj = rsocket.make_address(c_addr, addrlen).as_object(fd, space) assert space.is_true(space.eq(w_obj, space.newtuple([ space.wrap('lo'), - space.wrap(2048), + space.wrap(socket.ntohs(8)), space.wrap(13), space.wrap(False), space.wrap("abc"), From benjamin at codespeak.net Thu Sep 9 01:00:24 2010 From: benjamin at codespeak.net (benjamin at codespeak.net) Date: Thu, 9 Sep 2010 01:00:24 +0200 (CEST) Subject: [pypy-svn] r76955 - in pypy/branch/fast-forward: . lib-python lib-python/modified-2.5.2 lib-python/modified-2.5.2/ctypes lib-python/modified-2.5.2/test lib_pypy lib_pypy/_ctypes lib_pypy/pypy_test pypy/annotation pypy/annotation/test pypy/bin pypy/config pypy/doc pypy/doc/config pypy/interpreter pypy/interpreter/astcompiler pypy/interpreter/astcompiler/tools pypy/interpreter/test pypy/jit/backend pypy/jit/backend/llgraph pypy/jit/backend/llsupport pypy/jit/backend/llsupport/test pypy/jit/backend/test pypy/jit/backend/x86 pypy/jit/backend/x86/test pypy/jit/backend/x86/tool pypy/jit/codewriter pypy/jit/codewriter/test pypy/jit/metainterp pypy/jit/metainterp/doc pypy/jit/metainterp/test pypy/jit/tl pypy/jit/tool pypy/jit/tool/test pypy/module/__builtin__ pypy/module/__builtin__/test pypy/module/_ast pypy/module/_ast/test pypy/module/_codecs/test pypy/module/_demo pypy/module/_file pypy/module/_file/test pypy/module/_locale pypy/module/_rawffi/test pypy/module/_socket pypy/module/_socket/test pypy/module/_sre pypy/module/_sre/test pypy/module/_stackless pypy/module/array pypy/module/array/benchmark pypy/module/array/test pypy/module/cpyext pypy/module/cpyext/test pypy/module/fcntl/test pypy/module/marshal/test pypy/module/posix pypy/module/posix/test pypy/module/pypyjit pypy/module/pypyjit/test pypy/module/signal pypy/module/test_lib_pypy pypy/module/thread/test pypy/objspace/flow pypy/objspace/std pypy/objspace/std/test pypy/rlib pypy/rlib/rsre pypy/rlib/rsre/test pypy/rlib/test pypy/rpython pypy/rpython/lltypesystem pypy/rpython/lltypesystem/test pypy/rpython/memory pypy/rpython/memory/gc pypy/rpython/memory/gc/test pypy/rpython/memory/gctransform pypy/rpython/memory/test pypy/rpython/module pypy/rpython/module/test pypy/rpython/ootypesystem pypy/rpython/test pypy/rpython/tool pypy/tool pypy/tool/release pypy/tool/release/test pypy/translator pypy/translator/backendopt/test pypy/translator/c pypy/translator/c/gcc pypy/translator/c/gcc/test pypy/translator/c/gcc/test/elf64 pypy/translator/c/test pypy/translator/goal pypy/translator/goal/test2 pypy/translator/platform pypy/translator/platform/test Message-ID: <20100908230024.76051282BDC@codespeak.net> Author: benjamin Date: Thu Sep 9 01:00:13 2010 New Revision: 76955 Added: pypy/branch/fast-forward/lib_pypy/array.py - copied unchanged from r76954, pypy/trunk/lib_pypy/array.py pypy/branch/fast-forward/pypy/doc/config/objspace.usemodules.array.txt (props changed) - copied unchanged from r76954, pypy/trunk/pypy/doc/config/objspace.usemodules.array.txt pypy/branch/fast-forward/pypy/jit/backend/x86/arch.py - copied unchanged from r76954, pypy/trunk/pypy/jit/backend/x86/arch.py pypy/branch/fast-forward/pypy/jit/backend/x86/regloc.py - copied unchanged from r76954, pypy/trunk/pypy/jit/backend/x86/regloc.py pypy/branch/fast-forward/pypy/jit/backend/x86/rx86.py - copied unchanged from r76954, pypy/trunk/pypy/jit/backend/x86/rx86.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regloc.py - copied unchanged from r76954, pypy/trunk/pypy/jit/backend/x86/test/test_regloc.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_rx86.py - copied unchanged from r76954, pypy/trunk/pypy/jit/backend/x86/test/test_rx86.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py - copied unchanged from r76954, pypy/trunk/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py - copied unchanged from r76954, pypy/trunk/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py pypy/branch/fast-forward/pypy/jit/backend/x86/tool/instruction_encoding.sh - copied unchanged from r76954, pypy/trunk/pypy/jit/backend/x86/tool/instruction_encoding.sh pypy/branch/fast-forward/pypy/jit/tool/gen-trace-mode-keywords.py - copied unchanged from r76954, pypy/trunk/pypy/jit/tool/gen-trace-mode-keywords.py pypy/branch/fast-forward/pypy/jit/tool/pypytrace-mode.el - copied unchanged from r76954, pypy/trunk/pypy/jit/tool/pypytrace-mode.el pypy/branch/fast-forward/pypy/module/array/ (props changed) - copied from r76954, pypy/trunk/pypy/module/array/ pypy/branch/fast-forward/pypy/module/test_lib_pypy/test_msvcrt.py - copied unchanged from r76954, pypy/trunk/pypy/module/test_lib_pypy/test_msvcrt.py pypy/branch/fast-forward/pypy/rlib/rsre/ (props changed) - copied from r76954, pypy/trunk/pypy/rlib/rsre/ pypy/branch/fast-forward/pypy/rlib/rsre/__init__.py - copied unchanged from r76954, pypy/trunk/pypy/rlib/rsre/__init__.py pypy/branch/fast-forward/pypy/rlib/rsre/rsre_char.py - copied unchanged from r76954, pypy/trunk/pypy/rlib/rsre/rsre_char.py pypy/branch/fast-forward/pypy/rlib/rsre/rsre_core.py - copied unchanged from r76954, pypy/trunk/pypy/rlib/rsre/rsre_core.py pypy/branch/fast-forward/pypy/rlib/rsre/test/ (props changed) - copied from r76954, pypy/trunk/pypy/rlib/rsre/test/ pypy/branch/fast-forward/pypy/rlib/rsre/test/__init__.py - copied unchanged from r76954, pypy/trunk/pypy/rlib/rsre/test/__init__.py pypy/branch/fast-forward/pypy/rlib/rsre/test/targetrsre.py - copied unchanged from r76954, pypy/trunk/pypy/rlib/rsre/test/targetrsre.py pypy/branch/fast-forward/pypy/rlib/rsre/test/test_search.py - copied unchanged from r76954, pypy/trunk/pypy/rlib/rsre/test/test_search.py pypy/branch/fast-forward/pypy/rlib/rsre/test/test_zinterp.py - copied unchanged from r76954, pypy/trunk/pypy/rlib/rsre/test/test_zinterp.py pypy/branch/fast-forward/pypy/rlib/test/test_rposix.py - copied unchanged from r76954, pypy/trunk/pypy/rlib/test/test_rposix.py pypy/branch/fast-forward/pypy/rpython/module/ll_win32file.py - copied unchanged from r76954, pypy/trunk/pypy/rpython/module/ll_win32file.py pypy/branch/fast-forward/pypy/translator/c/gcc/test/elf64/ - copied from r76954, pypy/trunk/pypy/translator/c/gcc/test/elf64/ Removed: pypy/branch/fast-forward/lib-python/modified-2.5.2/test/test_re.py pypy/branch/fast-forward/lib_pypy/_sre.py pypy/branch/fast-forward/lib_pypy/greenlet.py pypy/branch/fast-forward/pypy/jit/backend/x86/ri386.py pypy/branch/fast-forward/pypy/jit/backend/x86/ri386setup.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_ri386.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_ri386_auto_encoding.py pypy/branch/fast-forward/pypy/jit/metainterp/doc/ pypy/branch/fast-forward/pypy/module/_sre/app_sre.py pypy/branch/fast-forward/pypy/module/pypyjit/test/test_can_inline.py pypy/branch/fast-forward/pypy/module/test_lib_pypy/test_array.py Modified: pypy/branch/fast-forward/ (props changed) pypy/branch/fast-forward/lib-python/conftest.py pypy/branch/fast-forward/lib-python/modified-2.5.2/ctypes/__init__.py pypy/branch/fast-forward/lib-python/modified-2.5.2/site.py pypy/branch/fast-forward/lib-python/modified-2.5.2/test/test_array.py pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py pypy/branch/fast-forward/lib_pypy/_ctypes/array.py pypy/branch/fast-forward/lib_pypy/_ctypes/builtin.py pypy/branch/fast-forward/lib_pypy/_ctypes/function.py pypy/branch/fast-forward/lib_pypy/_ctypes/primitive.py pypy/branch/fast-forward/lib_pypy/datetime.py pypy/branch/fast-forward/lib_pypy/msvcrt.py pypy/branch/fast-forward/lib_pypy/pypy_test/test_coroutine.py pypy/branch/fast-forward/lib_pypy/pypy_test/test_ctypes_support.py pypy/branch/fast-forward/lib_pypy/pypy_test/test_datetime.py pypy/branch/fast-forward/lib_pypy/stackless.py pypy/branch/fast-forward/pypy/annotation/binaryop.py pypy/branch/fast-forward/pypy/annotation/bookkeeper.py pypy/branch/fast-forward/pypy/annotation/builtin.py pypy/branch/fast-forward/pypy/annotation/classdef.py pypy/branch/fast-forward/pypy/annotation/model.py pypy/branch/fast-forward/pypy/annotation/specialize.py pypy/branch/fast-forward/pypy/annotation/test/test_annrpython.py pypy/branch/fast-forward/pypy/bin/py.py pypy/branch/fast-forward/pypy/config/pypyoption.py pypy/branch/fast-forward/pypy/config/translationoption.py pypy/branch/fast-forward/pypy/doc/faq.txt pypy/branch/fast-forward/pypy/interpreter/argument.py pypy/branch/fast-forward/pypy/interpreter/astcompiler/assemble.py pypy/branch/fast-forward/pypy/interpreter/astcompiler/ast.py pypy/branch/fast-forward/pypy/interpreter/astcompiler/codegen.py pypy/branch/fast-forward/pypy/interpreter/astcompiler/consts.py pypy/branch/fast-forward/pypy/interpreter/astcompiler/symtable.py pypy/branch/fast-forward/pypy/interpreter/astcompiler/tools/asdl_py.py pypy/branch/fast-forward/pypy/interpreter/baseobjspace.py pypy/branch/fast-forward/pypy/interpreter/error.py pypy/branch/fast-forward/pypy/interpreter/gateway.py pypy/branch/fast-forward/pypy/interpreter/generator.py pypy/branch/fast-forward/pypy/interpreter/pycode.py pypy/branch/fast-forward/pypy/interpreter/pyframe.py pypy/branch/fast-forward/pypy/interpreter/pyopcode.py pypy/branch/fast-forward/pypy/interpreter/test/test_code.py pypy/branch/fast-forward/pypy/interpreter/test/test_compiler.py pypy/branch/fast-forward/pypy/interpreter/test/test_gateway.py pypy/branch/fast-forward/pypy/interpreter/test/test_generator.py pypy/branch/fast-forward/pypy/interpreter/test/test_zpy.py pypy/branch/fast-forward/pypy/jit/backend/detect_cpu.py pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py pypy/branch/fast-forward/pypy/jit/backend/llsupport/descr.py pypy/branch/fast-forward/pypy/jit/backend/llsupport/gc.py pypy/branch/fast-forward/pypy/jit/backend/llsupport/regalloc.py pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_descr.py pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_gc.py pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_regalloc.py pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py pypy/branch/fast-forward/pypy/jit/backend/x86/codebuf.py pypy/branch/fast-forward/pypy/jit/backend/x86/jump.py pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py pypy/branch/fast-forward/pypy/jit/backend/x86/runner.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/conftest.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_assembler.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_basic.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_gc_integration.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_jump.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_recompilation.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regalloc.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regalloc2.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_runner.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_symbolic_x86.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_zll_random.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_zrpy_gc.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_ztranslation.py pypy/branch/fast-forward/pypy/jit/backend/x86/tool/viewcode.py pypy/branch/fast-forward/pypy/jit/codewriter/assembler.py pypy/branch/fast-forward/pypy/jit/codewriter/jitcode.py pypy/branch/fast-forward/pypy/jit/codewriter/jtransform.py pypy/branch/fast-forward/pypy/jit/codewriter/test/test_flatten.py pypy/branch/fast-forward/pypy/jit/metainterp/blackhole.py pypy/branch/fast-forward/pypy/jit/metainterp/executor.py pypy/branch/fast-forward/pypy/jit/metainterp/history.py pypy/branch/fast-forward/pypy/jit/metainterp/jitdriver.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt.py pypy/branch/fast-forward/pypy/jit/metainterp/pyjitpl.py pypy/branch/fast-forward/pypy/jit/metainterp/resoperation.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_basic.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_executor.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_immutable.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_jitdriver.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_loop.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_loop_spec.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_recursive.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_virtualizable.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_warmspot.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_warmstate.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_ztranslation.py pypy/branch/fast-forward/pypy/jit/metainterp/warmspot.py pypy/branch/fast-forward/pypy/jit/metainterp/warmstate.py pypy/branch/fast-forward/pypy/jit/tl/pypyjit.py pypy/branch/fast-forward/pypy/jit/tl/pypyjit_demo.py pypy/branch/fast-forward/pypy/jit/tool/test/test_traceviewer.py pypy/branch/fast-forward/pypy/jit/tool/traceviewer.py pypy/branch/fast-forward/pypy/module/__builtin__/compiling.py pypy/branch/fast-forward/pypy/module/__builtin__/functional.py pypy/branch/fast-forward/pypy/module/__builtin__/interp_classobj.py pypy/branch/fast-forward/pypy/module/__builtin__/test/test_buffer.py pypy/branch/fast-forward/pypy/module/__builtin__/test/test_classobj.py pypy/branch/fast-forward/pypy/module/_ast/__init__.py pypy/branch/fast-forward/pypy/module/_ast/test/test_ast.py pypy/branch/fast-forward/pypy/module/_codecs/test/test_codecs.py pypy/branch/fast-forward/pypy/module/_demo/demo.py pypy/branch/fast-forward/pypy/module/_file/interp_file.py pypy/branch/fast-forward/pypy/module/_file/test/test_file.py pypy/branch/fast-forward/pypy/module/_file/test/test_file_extra.py pypy/branch/fast-forward/pypy/module/_locale/__init__.py pypy/branch/fast-forward/pypy/module/_rawffi/test/test__rawffi.py pypy/branch/fast-forward/pypy/module/_socket/interp_socket.py pypy/branch/fast-forward/pypy/module/_socket/test/test_sock_app.py pypy/branch/fast-forward/pypy/module/_sre/__init__.py pypy/branch/fast-forward/pypy/module/_sre/interp_sre.py pypy/branch/fast-forward/pypy/module/_sre/test/test_app_sre.py pypy/branch/fast-forward/pypy/module/_stackless/interp_coroutine.py pypy/branch/fast-forward/pypy/module/array/benchmark/ (props changed) pypy/branch/fast-forward/pypy/module/array/test/ (props changed) pypy/branch/fast-forward/pypy/module/cpyext/api.py pypy/branch/fast-forward/pypy/module/cpyext/methodobject.py pypy/branch/fast-forward/pypy/module/cpyext/stubs.py pypy/branch/fast-forward/pypy/module/cpyext/test/test_unicodeobject.py pypy/branch/fast-forward/pypy/module/cpyext/unicodeobject.py pypy/branch/fast-forward/pypy/module/fcntl/test/test_fcntl.py pypy/branch/fast-forward/pypy/module/marshal/test/test_marshalimpl.py pypy/branch/fast-forward/pypy/module/posix/interp_posix.py pypy/branch/fast-forward/pypy/module/posix/test/test_posix2.py pypy/branch/fast-forward/pypy/module/pypyjit/interp_jit.py pypy/branch/fast-forward/pypy/module/pypyjit/policy.py pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py pypy/branch/fast-forward/pypy/module/signal/interp_signal.py pypy/branch/fast-forward/pypy/module/thread/test/test_gil.py pypy/branch/fast-forward/pypy/objspace/flow/specialcase.py pypy/branch/fast-forward/pypy/objspace/std/callmethod.py pypy/branch/fast-forward/pypy/objspace/std/intobject.py pypy/branch/fast-forward/pypy/objspace/std/itertype.py pypy/branch/fast-forward/pypy/objspace/std/model.py pypy/branch/fast-forward/pypy/objspace/std/objspace.py pypy/branch/fast-forward/pypy/objspace/std/test/test_callmethod.py pypy/branch/fast-forward/pypy/objspace/std/tupleobject.py pypy/branch/fast-forward/pypy/rlib/debug.py pypy/branch/fast-forward/pypy/rlib/jit.py pypy/branch/fast-forward/pypy/rlib/objectmodel.py pypy/branch/fast-forward/pypy/rlib/rarithmetic.py pypy/branch/fast-forward/pypy/rlib/rlocale.py pypy/branch/fast-forward/pypy/rlib/rmmap.py pypy/branch/fast-forward/pypy/rlib/rposix.py pypy/branch/fast-forward/pypy/rlib/rsha.py pypy/branch/fast-forward/pypy/rlib/rsocket.py pypy/branch/fast-forward/pypy/rlib/rweakref.py pypy/branch/fast-forward/pypy/rlib/rwin32.py pypy/branch/fast-forward/pypy/rlib/rzipfile.py pypy/branch/fast-forward/pypy/rlib/streamio.py pypy/branch/fast-forward/pypy/rlib/test/test_jit.py pypy/branch/fast-forward/pypy/rlib/test/test_objectmodel.py pypy/branch/fast-forward/pypy/rpython/extfunc.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/llgroup.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/llmemory.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/lloperation.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/lltype.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/opimpl.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/rffi.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/rstr.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_llgroup.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_lloperation.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_lltype.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_rffi.py pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py pypy/branch/fast-forward/pypy/rpython/memory/gc/markcompact.py pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py pypy/branch/fast-forward/pypy/rpython/memory/gctransform/asmgcroot.py pypy/branch/fast-forward/pypy/rpython/memory/gctypelayout.py pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py pypy/branch/fast-forward/pypy/rpython/memory/test/test_gc.py pypy/branch/fast-forward/pypy/rpython/memory/test/test_gctypelayout.py pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py pypy/branch/fast-forward/pypy/rpython/module/ll_os.py pypy/branch/fast-forward/pypy/rpython/module/ll_os_stat.py pypy/branch/fast-forward/pypy/rpython/module/ll_time.py pypy/branch/fast-forward/pypy/rpython/module/test/test_ll_os_stat.py pypy/branch/fast-forward/pypy/rpython/ootypesystem/ootype.py pypy/branch/fast-forward/pypy/rpython/ootypesystem/rclass.py pypy/branch/fast-forward/pypy/rpython/rbuiltin.py pypy/branch/fast-forward/pypy/rpython/rclass.py pypy/branch/fast-forward/pypy/rpython/rstr.py pypy/branch/fast-forward/pypy/rpython/test/test_extfunc.py pypy/branch/fast-forward/pypy/rpython/test/test_rclass.py pypy/branch/fast-forward/pypy/rpython/test/test_rint.py pypy/branch/fast-forward/pypy/rpython/tool/rfficache.py pypy/branch/fast-forward/pypy/tool/release/package.py pypy/branch/fast-forward/pypy/tool/release/test/test_package.py pypy/branch/fast-forward/pypy/tool/runsubprocess.py pypy/branch/fast-forward/pypy/translator/backendopt/test/test_constfold.py pypy/branch/fast-forward/pypy/translator/c/database.py pypy/branch/fast-forward/pypy/translator/c/gc.py pypy/branch/fast-forward/pypy/translator/c/gcc/instruction.py pypy/branch/fast-forward/pypy/translator/c/gcc/test/conftest.py pypy/branch/fast-forward/pypy/translator/c/gcc/test/test_trackgcroot.py pypy/branch/fast-forward/pypy/translator/c/gcc/trackgcroot.py pypy/branch/fast-forward/pypy/translator/c/genc.py pypy/branch/fast-forward/pypy/translator/c/node.py pypy/branch/fast-forward/pypy/translator/c/test/test_newgc.py pypy/branch/fast-forward/pypy/translator/c/test/test_standalone.py pypy/branch/fast-forward/pypy/translator/exceptiontransform.py pypy/branch/fast-forward/pypy/translator/goal/app_main.py pypy/branch/fast-forward/pypy/translator/goal/test2/test_app_main.py pypy/branch/fast-forward/pypy/translator/goal/translate.py pypy/branch/fast-forward/pypy/translator/platform/__init__.py pypy/branch/fast-forward/pypy/translator/platform/darwin.py pypy/branch/fast-forward/pypy/translator/platform/freebsd7.py pypy/branch/fast-forward/pypy/translator/platform/linux.py pypy/branch/fast-forward/pypy/translator/platform/maemo.py pypy/branch/fast-forward/pypy/translator/platform/posix.py pypy/branch/fast-forward/pypy/translator/platform/test/test_platform.py pypy/branch/fast-forward/pypy/translator/platform/windows.py Log: merge from trunk Modified: pypy/branch/fast-forward/lib-python/conftest.py ============================================================================== --- pypy/branch/fast-forward/lib-python/conftest.py (original) +++ pypy/branch/fast-forward/lib-python/conftest.py Thu Sep 9 01:00:13 2010 @@ -137,7 +137,7 @@ RegrTest('test_ast.py', core=True), RegrTest('test_anydbm.py'), RegrTest('test_applesingle.py', skip=True), - RegrTest('test_array.py', core=True, usemodules='struct'), + RegrTest('test_array.py', core=True, usemodules='struct array'), RegrTest('test_ascii_formatd.py'), RegrTest('test_asynchat.py', usemodules='thread'), RegrTest('test_asyncore.py'), @@ -514,11 +514,7 @@ RegrTest('test_coding.py'), RegrTest('test_complex_args.py'), RegrTest('test_contextlib.py', usemodules="thread"), - # we skip test ctypes, since we adapted it massively in order - # to test what we want to support. There are real failures, - # but it's about missing features that we don't want to support - # now - RegrTest('test_ctypes.py', skip="we have a replacement"), + RegrTest('test_ctypes.py', usemodules="_rawffi"), RegrTest('test_defaultdict.py'), RegrTest('test_email_renamed.py'), RegrTest('test_exception_variations.py'), Modified: pypy/branch/fast-forward/lib-python/modified-2.5.2/ctypes/__init__.py ============================================================================== --- pypy/branch/fast-forward/lib-python/modified-2.5.2/ctypes/__init__.py (original) +++ pypy/branch/fast-forward/lib-python/modified-2.5.2/ctypes/__init__.py Thu Sep 9 01:00:13 2010 @@ -471,7 +471,7 @@ # functions -from _ctypes import _memmove_addr, _memset_addr, _string_at_addr, _cast_addr +from _ctypes import _memmove_addr, _memset_addr, _cast_addr ## void *memmove(void *, const void *, size_t); memmove = CFUNCTYPE(c_void_p, c_void_p, c_void_p, c_size_t)(_memmove_addr) @@ -490,24 +490,34 @@ def cast(obj, typ): return _cast(obj, obj, typ) -_string_at = CFUNCTYPE(py_object, c_void_p, c_int)(_string_at_addr) +try: + from _ctypes import _string_at_addr +except ImportError: + from _ctypes import _string_at +else: + _string_at = CFUNCTYPE(py_object, c_void_p, c_int)(_string_at_addr) + def string_at(ptr, size=-1): """string_at(addr[, size]) -> string Return the string at addr.""" return _string_at(ptr, size) +def wstring_at(ptr, size=-1): + """wstring_at(addr[, size]) -> string + + Return the string at addr.""" + return _wstring_at(ptr, size) + try: from _ctypes import _wstring_at_addr except ImportError: - pass + try: + from _ctypes import _wstring_at + except ImportError: + del wstring_at else: _wstring_at = CFUNCTYPE(py_object, c_void_p, c_int)(_wstring_at_addr) - def wstring_at(ptr, size=-1): - """wstring_at(addr[, size]) -> string - - Return the string at addr.""" - return _wstring_at(ptr, size) if _os.name in ("nt", "ce"): # COM stuff Modified: pypy/branch/fast-forward/lib-python/modified-2.5.2/site.py ============================================================================== --- pypy/branch/fast-forward/lib-python/modified-2.5.2/site.py (original) +++ pypy/branch/fast-forward/lib-python/modified-2.5.2/site.py Thu Sep 9 01:00:13 2010 @@ -175,7 +175,7 @@ def addsitepackages(known_paths): """Add site-packages to sys.path, in a PyPy-specific way.""" - if hasattr(sys, 'pypy_version_info'): + if hasattr(sys, 'pypy_version_info') and hasattr(sys, 'prefix'): from distutils.sysconfig import get_python_lib sitedir = get_python_lib(standard_lib=False) if os.path.isdir(sitedir): Modified: pypy/branch/fast-forward/lib-python/modified-2.5.2/test/test_array.py ============================================================================== --- pypy/branch/fast-forward/lib-python/modified-2.5.2/test/test_array.py (original) +++ pypy/branch/fast-forward/lib-python/modified-2.5.2/test/test_array.py Thu Sep 9 01:00:13 2010 @@ -269,9 +269,11 @@ ) b = array.array(self.badtypecode()) - self.assertRaises(TypeError, a.__add__, b) + #self.assertRaises(TypeError, a.__add__, b) + #self.assertRaises(TypeError, a.__add__, "bad") + self.assertRaises(TypeError, lambda i, j: i + j, a, b) + self.assertRaises(TypeError, lambda i, j: i + j, a, "bad") - self.assertRaises(TypeError, a.__add__, "bad") def test_iadd(self): a = array.array(self.typecode, self.example[::-1]) @@ -284,9 +286,12 @@ ) b = array.array(self.badtypecode()) - self.assertRaises(TypeError, a.__add__, b) - - self.assertRaises(TypeError, a.__iadd__, "bad") + #self.assertRaises(TypeError, a.__add__, b) + #self.assertRaises(TypeError, a.__iadd__, "bad") + def f(i, j): + i += j + self.assertRaises(TypeError, f, a, b) + self.assertRaises(TypeError, f, a, "bad") def test_mul(self): a = 5*array.array(self.typecode, self.example) @@ -313,7 +318,8 @@ array.array(self.typecode) ) - self.assertRaises(TypeError, a.__mul__, "bad") + #self.assertRaises(TypeError, a.__mul__, "bad") + self.assertRaises(TypeError, lambda i, j: i * j, a, "bad") def test_imul(self): a = array.array(self.typecode, self.example) @@ -342,7 +348,10 @@ a *= -1 self.assertEqual(a, array.array(self.typecode)) - self.assertRaises(TypeError, a.__imul__, "bad") + #self.assertRaises(TypeError, a.__imul__, "bad") + def f(i, j): + i *= j + self.assertRaises(TypeError, f, a, "bad") def test_getitem(self): a = array.array(self.typecode, self.example) Modified: pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py (original) +++ pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py Thu Sep 9 01:00:13 2010 @@ -7,8 +7,8 @@ from _ctypes.dll import dlopen from _ctypes.structure import Structure from _ctypes.array import Array -from _ctypes.builtin import _memmove_addr, _string_at_addr, _memset_addr,\ - set_conversion_mode, _wstring_at_addr +from _ctypes.builtin import _memmove_addr, _string_at, _memset_addr,\ + set_conversion_mode, _wstring_at from _ctypes.union import Union import os as _os Modified: pypy/branch/fast-forward/lib_pypy/_ctypes/array.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/_ctypes/array.py (original) +++ pypy/branch/fast-forward/lib_pypy/_ctypes/array.py Thu Sep 9 01:00:13 2010 @@ -4,7 +4,6 @@ from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof from _ctypes.basics import keepalive_key, store_reference, ensure_objects from _ctypes.basics import CArgObject -from _ctypes.builtin import _string_at_addr, _wstring_at_addr def _create_unicode(buffer, maxlength): res = [] @@ -76,7 +75,7 @@ def _CData_output(self, resarray, base=None, index=-1): # this seems to be a string if we're array of char, surprise! - from ctypes import c_char, c_wchar, c_char_p, c_wchar_p + from ctypes import c_char, c_wchar if self._type_ is c_char: return _rawffi.charp2string(resarray.buffer, self._length_) if self._type_ is c_wchar: Modified: pypy/branch/fast-forward/lib_pypy/_ctypes/builtin.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/_ctypes/builtin.py (original) +++ pypy/branch/fast-forward/lib_pypy/_ctypes/builtin.py Thu Sep 9 01:00:13 2010 @@ -8,10 +8,10 @@ _memmove_addr = _rawffi.get_libc().getaddressindll('memmove') _memset_addr = _rawffi.get_libc().getaddressindll('memset') -def _string_at_addr(addr, lgt): +def _string_at(addr, lgt): # address here can be almost anything import ctypes - arg = ctypes.c_char_p._CData_value(addr) + arg = ctypes.c_void_p._CData_value(addr) return _rawffi.charp2rawstring(arg, lgt) def set_conversion_mode(encoding, errors): @@ -20,9 +20,9 @@ ConvMode.encoding = encoding return old_cm -def _wstring_at_addr(addr, lgt): +def _wstring_at(addr, lgt): import ctypes - arg = ctypes.c_wchar_p._CData_value(addr) + arg = ctypes.c_void_p._CData_value(addr) # XXX purely applevel if lgt == -1: lgt = sys.maxint Modified: pypy/branch/fast-forward/lib_pypy/_ctypes/function.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/_ctypes/function.py (original) +++ pypy/branch/fast-forward/lib_pypy/_ctypes/function.py Thu Sep 9 01:00:13 2010 @@ -60,7 +60,6 @@ return self._restype_ def _setrestype(self, restype): self._ptr = None - from ctypes import c_char_p if restype is int: from ctypes import c_int restype = c_int @@ -214,9 +213,7 @@ @staticmethod def _guess_argtypes(args): - from _ctypes import _CData from ctypes import c_char_p, c_wchar_p, c_void_p, c_int - from ctypes import Array, Structure res = [] for arg in args: if hasattr(arg, '_as_parameter_'): Modified: pypy/branch/fast-forward/lib_pypy/_ctypes/primitive.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/_ctypes/primitive.py (original) +++ pypy/branch/fast-forward/lib_pypy/_ctypes/primitive.py Thu Sep 9 01:00:13 2010 @@ -57,7 +57,6 @@ pyobj_container = GlobalPyobjContainer() def generic_xxx_p_from_param(cls, value): - from _ctypes import Array, _Pointer if value is None: return cls(None) if isinstance(value, basestring): @@ -86,6 +85,8 @@ return value if isinstance(value, _Pointer): return cls.from_address(value._buffer.buffer) + if isinstance(value, (int, long)): + return cls(value) FROM_PARAM_BY_TYPE = { 'z': from_param_char_p, @@ -117,8 +118,6 @@ result._ffiarray = ffiarray if tp == 'z': # c_char_p - from _ctypes import Array, _Pointer - def _getvalue(self): addr = self._buffer[0] if addr == 0: @@ -141,13 +140,13 @@ result.value = property(_getvalue, _setvalue) elif tp == 'Z': # c_wchar_p - from _ctypes import Array, _Pointer, _wstring_at_addr + from _ctypes import _wstring_at def _getvalue(self): addr = self._buffer[0] if addr == 0: return None else: - return _wstring_at_addr(addr, -1) + return _wstring_at(addr, -1) def _setvalue(self, value): if isinstance(value, basestring): @@ -216,14 +215,14 @@ SysAllocStringLen = windll.oleaut32.SysAllocStringLen SysStringLen = windll.oleaut32.SysStringLen SysFreeString = windll.oleaut32.SysFreeString - from _ctypes import _wstring_at_addr + from _ctypes import _wstring_at def _getvalue(self): addr = self._buffer[0] if addr == 0: return None else: size = SysStringLen(addr) - return _wstring_at_addr(addr, size) + return _wstring_at(addr, size) def _setvalue(self, value): if isinstance(value, basestring): @@ -254,18 +253,21 @@ from_address = cdata_from_address def from_param(self, value): + if isinstance(value, self): + return value + from_param_f = FROM_PARAM_BY_TYPE.get(self._type_) if from_param_f: res = from_param_f(self, value) if res is not None: return res - - if isinstance(value, self): - return value - try: - return self(value) - except (TypeError, ValueError): - return super(SimpleType, self).from_param(value) + else: + try: + return self(value) + except (TypeError, ValueError): + pass + + return super(SimpleType, self).from_param(value) def _CData_output(self, resbuffer, base=None, index=-1): output = super(SimpleType, self)._CData_output(resbuffer, base, index) Modified: pypy/branch/fast-forward/lib_pypy/datetime.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/datetime.py (original) +++ pypy/branch/fast-forward/lib_pypy/datetime.py Thu Sep 9 01:00:13 2010 @@ -1412,7 +1412,7 @@ def utcfromtimestamp(cls, t): "Construct a UTC datetime from a POSIX timestamp (like time.time())." - if 1 - (t % 1.0) < 0.000001: + if 1 - (t % 1.0) < 0.0000005: t = float(int(t)) + 1 if t < 0: t -= 1 Modified: pypy/branch/fast-forward/lib_pypy/msvcrt.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/msvcrt.py (original) +++ pypy/branch/fast-forward/lib_pypy/msvcrt.py Thu Sep 9 01:00:13 2010 @@ -5,9 +5,12 @@ """ # XXX incomplete: implemented only functions needed by subprocess.py +# PAC: 2010/08 added MS locking for Whoosh import ctypes from ctypes_support import standard_c_lib as _c +from ctypes_support import get_errno +import errno try: open_osfhandle = _c._open_osfhandle @@ -25,4 +28,17 @@ setmode.argtypes = [ctypes.c_int, ctypes.c_int] setmode.restype = ctypes.c_int +LK_UNLCK, LK_LOCK, LK_NBLCK, LK_RLCK, LK_NBRLCK = range(5) + +_locking = _c._locking +_locking.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int] +_locking.restype = ctypes.c_int + +def locking(fd, mode, nbytes): + '''lock or unlock a number of bytes in a file.''' + rv = _locking(fd, mode, nbytes) + if rv != 0: + e = get_errno() + raise IOError(e, errno.errorcode[e]) + del ctypes Modified: pypy/branch/fast-forward/lib_pypy/pypy_test/test_coroutine.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/pypy_test/test_coroutine.py (original) +++ pypy/branch/fast-forward/lib_pypy/pypy_test/test_coroutine.py Thu Sep 9 01:00:13 2010 @@ -2,7 +2,7 @@ from py.test import skip, raises try: - from ..stackless import coroutine + from ..stackless import coroutine, CoroutineExit except ImportError, e: skip('cannot import stackless: %s' % (e,)) Modified: pypy/branch/fast-forward/lib_pypy/pypy_test/test_ctypes_support.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/pypy_test/test_ctypes_support.py (original) +++ pypy/branch/fast-forward/lib_pypy/pypy_test/test_ctypes_support.py Thu Sep 9 01:00:13 2010 @@ -20,3 +20,14 @@ assert get_errno() != 0 set_errno(0) assert get_errno() == 0 + +def test_argument_conversion_and_checks(): + import ctypes + libc = ctypes.cdll.LoadLibrary("libc.so.6") + libc.strlen.argtypes = ctypes.c_char_p, + libc.strlen.restype = ctypes.c_size_t + assert libc.strlen("eggs") == 4 + + # Should raise ArgumentError, not segfault + py.test.raises(ctypes.ArgumentError, libc.strlen, False) + Modified: pypy/branch/fast-forward/lib_pypy/pypy_test/test_datetime.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/pypy_test/test_datetime.py (original) +++ pypy/branch/fast-forward/lib_pypy/pypy_test/test_datetime.py Thu Sep 9 01:00:13 2010 @@ -15,4 +15,18 @@ expected = datetime.datetime(*(time.strptime(string, format)[0:6])) got = datetime.datetime.strptime(string, format) assert expected == got + +def test_datetime_rounding(): + b = 0.0000001 + a = 0.9999994 + + assert datetime.datetime.utcfromtimestamp(a).microsecond == 999999 + assert datetime.datetime.utcfromtimestamp(a).second == 0 + a += b + assert datetime.datetime.utcfromtimestamp(a).microsecond == 999999 + assert datetime.datetime.utcfromtimestamp(a).second == 0 + a += b + assert datetime.datetime.utcfromtimestamp(a).microsecond == 0 + assert datetime.datetime.utcfromtimestamp(a).second == 1 + Modified: pypy/branch/fast-forward/lib_pypy/stackless.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/stackless.py (original) +++ pypy/branch/fast-forward/lib_pypy/stackless.py Thu Sep 9 01:00:13 2010 @@ -14,9 +14,13 @@ import traceback import sys try: + # If _stackless can be imported then TaskletExit and CoroutineExit are + # automatically added to the builtins. from _stackless import coroutine, greenlet except ImportError: # we are running from CPython - from greenlet import greenlet + from greenlet import greenlet, GreenletExit + TaskletExit = CoroutineExit = GreenletExit + del GreenletExit try: from functools import partial except ImportError: # we are not running python 2.5 Modified: pypy/branch/fast-forward/pypy/annotation/binaryop.py ============================================================================== --- pypy/branch/fast-forward/pypy/annotation/binaryop.py (original) +++ pypy/branch/fast-forward/pypy/annotation/binaryop.py Thu Sep 9 01:00:13 2010 @@ -924,10 +924,10 @@ class __extend__(pairtype(SomeAddress, SomeAddress)): def union((s_addr1, s_addr2)): - return SomeAddress(is_null=s_addr1.is_null and s_addr2.is_null) + return SomeAddress() def sub((s_addr1, s_addr2)): - if s_addr1.is_null and s_addr2.is_null: + if s_addr1.is_null_address() and s_addr2.is_null_address(): return getbookkeeper().immutablevalue(0) return SomeInteger() @@ -953,10 +953,10 @@ class __extend__(pairtype(SomeAddress, SomeInteger)): def add((s_addr, s_int)): - return SomeAddress(is_null=False) + return SomeAddress() def sub((s_addr, s_int)): - return SomeAddress(is_null=False) + return SomeAddress() class __extend__(pairtype(SomeAddress, SomeImpossibleValue)): # need to override this specifically to hide the 'raise UnionError' Modified: pypy/branch/fast-forward/pypy/annotation/bookkeeper.py ============================================================================== --- pypy/branch/fast-forward/pypy/annotation/bookkeeper.py (original) +++ pypy/branch/fast-forward/pypy/annotation/bookkeeper.py Thu Sep 9 01:00:13 2010 @@ -338,8 +338,12 @@ result = SomeBool() elif tp is int: result = SomeInteger(nonneg = x>=0) - elif tp is long and 0 <= x <= (sys.maxint * 2 + 1): - result = SomeInteger(unsigned = True) + elif tp is long: + if -sys.maxint-1 <= x <= sys.maxint: + x = int(x) + result = SomeInteger(nonneg = x>=0) + else: + raise Exception("seeing a prebuilt long (value %s)" % hex(x)) elif issubclass(tp, str): # py.lib uses annotated str subclasses if len(x) == 1: result = SomeChar() @@ -431,7 +435,7 @@ elif isinstance(x, lltype._ptr): result = SomePtr(lltype.typeOf(x)) elif isinstance(x, llmemory.fakeaddress): - result = SomeAddress(is_null=not x) + result = SomeAddress() elif isinstance(x, ootype._static_meth): result = SomeOOStaticMeth(ootype.typeOf(x)) elif isinstance(x, ootype._class): Modified: pypy/branch/fast-forward/pypy/annotation/builtin.py ============================================================================== --- pypy/branch/fast-forward/pypy/annotation/builtin.py (original) +++ pypy/branch/fast-forward/pypy/annotation/builtin.py Thu Sep 9 01:00:13 2010 @@ -92,6 +92,8 @@ return s_obj.is_true() def builtin_int(s_obj, s_base=None): + if isinstance(s_obj, SomeInteger): + assert not s_obj.unsigned, "instead of int(r_uint(x)), use intmask(r_uint(x))" assert (s_base is None or isinstance(s_base, SomeInteger) and s_obj.knowntype == str), "only int(v|string) or int(string,int) expected" if s_base is not None: @@ -694,18 +696,14 @@ def raw_free(s_addr): assert isinstance(s_addr, SomeAddress) - assert not s_addr.is_null def raw_memclear(s_addr, s_int): assert isinstance(s_addr, SomeAddress) - assert not s_addr.is_null assert isinstance(s_int, SomeInteger) def raw_memcopy(s_addr1, s_addr2, s_int): assert isinstance(s_addr1, SomeAddress) - assert not s_addr1.is_null assert isinstance(s_addr2, SomeAddress) - assert not s_addr2.is_null assert isinstance(s_int, SomeInteger) #XXX add noneg...? BUILTIN_ANALYZERS[llmemory.raw_malloc] = raw_malloc Modified: pypy/branch/fast-forward/pypy/annotation/classdef.py ============================================================================== --- pypy/branch/fast-forward/pypy/annotation/classdef.py (original) +++ pypy/branch/fast-forward/pypy/annotation/classdef.py Thu Sep 9 01:00:13 2010 @@ -276,6 +276,8 @@ # create the Attribute and do the generalization asked for newattr = Attribute(attr, self.bookkeeper) if s_value: + if newattr.name == 'intval' and getattr(s_value, 'unsigned', False): + import pdb; pdb.set_trace() newattr.s_value = s_value # keep all subattributes' values Modified: pypy/branch/fast-forward/pypy/annotation/model.py ============================================================================== --- pypy/branch/fast-forward/pypy/annotation/model.py (original) +++ pypy/branch/fast-forward/pypy/annotation/model.py Thu Sep 9 01:00:13 2010 @@ -501,12 +501,13 @@ class SomeAddress(SomeObject): immutable = True - def __init__(self, is_null=False): - self.is_null = is_null def can_be_none(self): return False + def is_null_address(self): + return self.is_immutable_constant() and not self.const + # The following class is used to annotate the intermediate value that # appears in expressions of the form: # addr.signed[offset] and addr.signed[offset] = value Modified: pypy/branch/fast-forward/pypy/annotation/specialize.py ============================================================================== --- pypy/branch/fast-forward/pypy/annotation/specialize.py (original) +++ pypy/branch/fast-forward/pypy/annotation/specialize.py Thu Sep 9 01:00:13 2010 @@ -354,6 +354,12 @@ def specialize_argtype(funcdesc, args_s, *argindices): key = tuple([args_s[i].knowntype for i in argindices]) + for cls in key: + try: + assert '_must_specialize_' not in cls.classdesc.pyobj.__dict__, ( + "%s has the tag _must_specialize_" % (cls,)) + except AttributeError: + pass return maybe_star_args(funcdesc, key, args_s) def specialize_arglistitemtype(funcdesc, args_s, i): Modified: pypy/branch/fast-forward/pypy/annotation/test/test_annrpython.py ============================================================================== --- pypy/branch/fast-forward/pypy/annotation/test/test_annrpython.py (original) +++ pypy/branch/fast-forward/pypy/annotation/test/test_annrpython.py Thu Sep 9 01:00:13 2010 @@ -767,7 +767,6 @@ assert s.classdef is a.bookkeeper.getuniqueclassdef(IndexError) # KeyError ignored because l is a list def test_overrides(self): - import sys excs = [] def record_exc(e): """NOT_RPYTHON""" @@ -869,8 +868,27 @@ def f(): return large_constant a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, []) + # if you want to get a r_uint, you have to be explicit about it + + def test_prebuilt_long_that_is_not_too_long(self): + small_constant = 12L + def f(): + return small_constant + a = self.RPythonAnnotator() s = a.build_types(f, []) - assert s.knowntype == r_uint + assert s.const == 12 + assert s.nonneg + assert not s.unsigned + # + small_constant = -23L + def f(): + return small_constant + a = self.RPythonAnnotator() + s = a.build_types(f, []) + assert s.const == -23 + assert not s.nonneg + assert not s.unsigned def test_pbc_getattr(self): class C: @@ -1386,7 +1404,6 @@ assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(Exception) def test_sys_attrs(self): - import sys def f(): return sys.argv[0] a = self.RPythonAnnotator() Modified: pypy/branch/fast-forward/pypy/bin/py.py ============================================================================== --- pypy/branch/fast-forward/pypy/bin/py.py (original) +++ pypy/branch/fast-forward/pypy/bin/py.py Thu Sep 9 01:00:13 2010 @@ -76,6 +76,8 @@ config.objspace.suggest(allworkingmodules=False) if config.objspace.allworkingmodules: pypyoption.enable_allworkingmodules(config) + if config.objspace.usemodules.thread: + config.translation.thread = True # create the object space Modified: pypy/branch/fast-forward/pypy/config/pypyoption.py ============================================================================== --- pypy/branch/fast-forward/pypy/config/pypyoption.py (original) +++ pypy/branch/fast-forward/pypy/config/pypyoption.py Thu Sep 9 01:00:13 2010 @@ -30,7 +30,7 @@ "rctime" , "select", "zipimport", "_lsprof", "crypt", "signal", "_rawffi", "termios", "zlib", "struct", "md5", "sha", "bz2", "_minimal_curses", "cStringIO", - "thread", "itertools", "pyexpat", "_ssl", "cpyext"] + "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array"] )) working_oo_modules = default_modules.copy() Modified: pypy/branch/fast-forward/pypy/config/translationoption.py ============================================================================== --- pypy/branch/fast-forward/pypy/config/translationoption.py (original) +++ pypy/branch/fast-forward/pypy/config/translationoption.py Thu Sep 9 01:00:13 2010 @@ -342,6 +342,9 @@ 'jit': 'hybrid extraopts jit', } +def final_check_config(config): + pass + def set_opt_level(config, level): """Apply optimization suggestions on the 'config'. The optimizations depend on the selected level and possibly on the backend. Modified: pypy/branch/fast-forward/pypy/doc/faq.txt ============================================================================== --- pypy/branch/fast-forward/pypy/doc/faq.txt (original) +++ pypy/branch/fast-forward/pypy/doc/faq.txt Thu Sep 9 01:00:13 2010 @@ -47,8 +47,8 @@ There is also an experimental support for CPython extension modules, so they'll run without change (from current observation, rather with little -change) on trunk. It has not been released yet, although it should be a major -point of the next pypy release. +change) on trunk. It has been a part of 1.3 release, but support is still +in alpha phase. .. _`extension modules`: cpython_differences.html#extension-modules .. _`cpython_differences`: cpython_differences.html @@ -373,7 +373,7 @@ -------------------------------------------- No. PyPy always runs your code in its own interpreter, which is a -full and compliant Python 2.4 interpreter. RPython_ is only the +full and compliant Python 2.5 interpreter. RPython_ is only the language in which parts of PyPy itself are written and extension modules for it. The answer to whether something needs to be written as an extension module, apart from the "gluing to external libraries" reason, will Modified: pypy/branch/fast-forward/pypy/interpreter/argument.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/argument.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/argument.py Thu Sep 9 01:00:13 2010 @@ -52,11 +52,15 @@ self.argnames, self.varargname, self.kwargname) def __eq__(self, other): + if not isinstance(other, Signature): + return NotImplemented return (self.argnames == other.argnames and self.varargname == other.varargname and self.kwargname == other.kwargname) def __ne__(self, other): + if not isinstance(other, Signature): + return NotImplemented return not self == other Modified: pypy/branch/fast-forward/pypy/interpreter/astcompiler/assemble.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/astcompiler/assemble.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/astcompiler/assemble.py Thu Sep 9 01:00:13 2010 @@ -614,22 +614,22 @@ return (oparg % 256) + 2 * (oparg / 256) def _compute_CALL_FUNCTION(arg): - return _num_args(arg) + return -_num_args(arg) def _compute_CALL_FUNCTION_VAR(arg): - return _num_args(arg) - 1 + return -_num_args(arg) - 1 def _compute_CALL_FUNCTION_KW(arg): - return _num_args(arg) - 1 + return -_num_args(arg) - 1 def _compute_CALL_FUNCTION_VAR_KW(arg): - return _num_args(arg) - 2 + return -_num_args(arg) - 2 def _compute_CALL_LIKELY_BUILTIN(arg): return -(arg & 0xFF) + 1 def _compute_CALL_METHOD(arg): - return -arg - 1 + return -_num_args(arg) - 1 _stack_effect_computers = {} Modified: pypy/branch/fast-forward/pypy/interpreter/astcompiler/ast.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/astcompiler/ast.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/astcompiler/ast.py Thu Sep 9 01:00:13 2010 @@ -230,6 +230,7 @@ visitor.visit_FunctionDef(self) def mutate_over(self, visitor): + self.args = self.args.mutate_over(visitor) if self.body: visitor._mutate_sequence(self.body) if self.decorator_list: @@ -798,6 +799,8 @@ def mutate_over(self, visitor): if self.body: visitor._mutate_sequence(self.body) + if self.handlers: + visitor._mutate_sequence(self.handlers) if self.orelse: visitor._mutate_sequence(self.orelse) return visitor.visit_TryExcept(self) @@ -941,6 +944,8 @@ visitor.visit_Import(self) def mutate_over(self, visitor): + if self.names: + visitor._mutate_sequence(self.names) return visitor.visit_Import(self) def sync_app_attrs(self, space): @@ -979,6 +984,8 @@ visitor.visit_ImportFrom(self) def mutate_over(self, visitor): + if self.names: + visitor._mutate_sequence(self.names) return visitor.visit_ImportFrom(self) def sync_app_attrs(self, space): @@ -1296,6 +1303,7 @@ visitor.visit_Lambda(self) def mutate_over(self, visitor): + self.args = self.args.mutate_over(visitor) self.body = self.body.mutate_over(visitor) return visitor.visit_Lambda(self) @@ -1450,6 +1458,8 @@ def mutate_over(self, visitor): self.elt = self.elt.mutate_over(visitor) + if self.generators: + visitor._mutate_sequence(self.generators) return visitor.visit_ListComp(self) def sync_app_attrs(self, space): @@ -1570,6 +1580,8 @@ def mutate_over(self, visitor): self.elt = self.elt.mutate_over(visitor) + if self.generators: + visitor._mutate_sequence(self.generators) return visitor.visit_GeneratorExp(self) def sync_app_attrs(self, space): @@ -1695,6 +1707,8 @@ self.func = self.func.mutate_over(visitor) if self.args: visitor._mutate_sequence(self.args) + if self.keywords: + visitor._mutate_sequence(self.keywords) if self.starargs: self.starargs = self.starargs.mutate_over(visitor) if self.kwargs: @@ -2426,6 +2440,13 @@ self.w_ifs = None self.initialization_state = 7 + def mutate_over(self, visitor): + self.target = self.target.mutate_over(visitor) + self.iter = self.iter.mutate_over(visitor) + if self.ifs: + visitor._mutate_sequence(self.ifs) + return visitor.visit_comprehension(self) + def walkabout(self, visitor): visitor.visit_comprehension(self) @@ -2460,6 +2481,15 @@ self.col_offset = col_offset self.initialization_state = 31 + def mutate_over(self, visitor): + if self.type: + self.type = self.type.mutate_over(visitor) + if self.name: + self.name = self.name.mutate_over(visitor) + if self.body: + visitor._mutate_sequence(self.body) + return visitor.visit_excepthandler(self) + def walkabout(self, visitor): visitor.visit_excepthandler(self) @@ -2499,6 +2529,13 @@ self.w_defaults = None self.initialization_state = 15 + def mutate_over(self, visitor): + if self.args: + visitor._mutate_sequence(self.args) + if self.defaults: + visitor._mutate_sequence(self.defaults) + return visitor.visit_arguments(self) + def walkabout(self, visitor): visitor.visit_arguments(self) @@ -2540,6 +2577,10 @@ self.value = value self.initialization_state = 3 + def mutate_over(self, visitor): + self.value = self.value.mutate_over(visitor) + return visitor.visit_keyword(self) + def walkabout(self, visitor): visitor.visit_keyword(self) @@ -2559,6 +2600,9 @@ self.asname = asname self.initialization_state = 3 + def mutate_over(self, visitor): + return visitor.visit_alias(self) + def walkabout(self, visitor): visitor.visit_alias(self) Modified: pypy/branch/fast-forward/pypy/interpreter/astcompiler/codegen.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/astcompiler/codegen.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/astcompiler/codegen.py Thu Sep 9 01:00:13 2010 @@ -298,9 +298,11 @@ # Load decorators first, but apply them after the function is created. if func.decorator_list: self.visit_sequence(func.decorator_list) - if func.args.defaults: - self.visit_sequence(func.args.defaults) - num_defaults = len(func.args.defaults) + args = func.args + assert isinstance(args, ast.arguments) + if args.defaults: + self.visit_sequence(args.defaults) + num_defaults = len(args.defaults) else: num_defaults = 0 code = self.sub_scope(FunctionCodeGenerator, func.name, func, @@ -314,9 +316,11 @@ def visit_Lambda(self, lam): self.update_position(lam.lineno) - if lam.args.defaults: - self.visit_sequence(lam.args.defaults) - default_count = len(lam.args.defaults) + args = lam.args + assert isinstance(args, ast.arguments) + if args.defaults: + self.visit_sequence(args.defaults) + default_count = len(args.defaults) else: default_count = 0 code = self.sub_scope(LambdaCodeGenerator, "", lam, lam.lineno) @@ -980,9 +984,12 @@ elif call_type == 3: op = ops.CALL_FUNCTION_VAR_KW self.emit_op_arg(op, arg) + + def _call_has_no_star_args(self, call): + return not call.starargs and not call.kwargs def _call_has_simple_args(self, call): - return not call.starargs and not call.kwargs and not call.keywords + return self._call_has_no_star_args(call) and not call.keywords def _optimize_builtin_call(self, call): if not self.space.config.objspace.opcodes.CALL_LIKELY_BUILTIN or \ @@ -1008,7 +1015,7 @@ def _optimize_method_call(self, call): if not self.space.config.objspace.opcodes.CALL_METHOD or \ - not self._call_has_simple_args(call) or \ + not self._call_has_no_star_args(call) or \ not isinstance(call.func, ast.Attribute): return False attr_lookup = call.func @@ -1020,7 +1027,12 @@ arg_count = len(call.args) else: arg_count = 0 - self.emit_op_arg(ops.CALL_METHOD, arg_count) + if call.keywords: + self.visit_sequence(call.keywords) + kwarg_count = len(call.keywords) + else: + kwarg_count = 0 + self.emit_op_arg(ops.CALL_METHOD, (kwarg_count << 8) | arg_count) return True def _listcomp_generator(self, gens, gen_index, elt): @@ -1284,9 +1296,11 @@ else: self.add_const(self.space.w_None) start = 0 - if func.args.args: - self._handle_nested_args(func.args.args) - self.argcount = len(func.args.args) + args = func.args + assert isinstance(args, ast.arguments) + if args.args: + self._handle_nested_args(args.args) + self.argcount = len(args.args) for i in range(start, len(func.body)): func.body[i].walkabout(self) @@ -1295,9 +1309,11 @@ def _compile(self, lam): assert isinstance(lam, ast.Lambda) - if lam.args.args: - self._handle_nested_args(lam.args.args) - self.argcount = len(lam.args.args) + args = lam.args + assert isinstance(args, ast.arguments) + if args.args: + self._handle_nested_args(args.args) + self.argcount = len(args.args) # Prevent a string from being the first constant and thus a docstring. self.add_const(self.space.w_None) lam.body.walkabout(self) Modified: pypy/branch/fast-forward/pypy/interpreter/astcompiler/consts.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/astcompiler/consts.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/astcompiler/consts.py Thu Sep 9 01:00:13 2010 @@ -9,7 +9,6 @@ CO_NESTED = 0x0010 CO_GENERATOR = 0x0020 CO_NOFREE = 0x0040 -CO_CONTAINSLOOP = 0x0080 CO_CONTAINSGLOBALS = 0x0800 CO_GENERATOR_ALLOWED = 0x1000 CO_FUTURE_DIVISION = 0x2000 @@ -20,4 +19,4 @@ PyCF_SOURCE_IS_UTF8 = 0x0100 PyCF_DONT_IMPLY_DEDENT = 0x0200 -PyCF_AST_ONLY = 0x0400 +PyCF_ONLY_AST = 0x0400 Modified: pypy/branch/fast-forward/pypy/interpreter/astcompiler/symtable.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/astcompiler/symtable.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/astcompiler/symtable.py Thu Sep 9 01:00:13 2010 @@ -353,8 +353,10 @@ def visit_FunctionDef(self, func): self.note_symbol(func.name, SYM_ASSIGNED) # Function defaults and decorators happen in the outer scope. - if func.args.defaults: - self.visit_sequence(func.args.defaults) + args = func.args + assert isinstance(args, ast.arguments) + if args.defaults: + self.visit_sequence(args.defaults) if func.decorator_list: self.visit_sequence(func.decorator_list) new_scope = FunctionScope(func.name, func.lineno, func.col_offset) @@ -422,8 +424,10 @@ self.note_symbol(name, SYM_GLOBAL) def visit_Lambda(self, lamb): - if lamb.args.defaults: - self.visit_sequence(lamb.args.defaults) + args = lamb.args + assert isinstance(args, ast.arguments) + if args.defaults: + self.visit_sequence(args.defaults) new_scope = FunctionScope("lambda", lamb.lineno, lamb.col_offset) self.push_scope(new_scope, lamb) lamb.args.walkabout(self) Modified: pypy/branch/fast-forward/pypy/interpreter/astcompiler/tools/asdl_py.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/astcompiler/tools/asdl_py.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/astcompiler/tools/asdl_py.py Thu Sep 9 01:00:13 2010 @@ -100,6 +100,7 @@ self.emit("") self.make_constructor(product.fields, product) self.emit("") + self.make_mutate_over(product, name) self.emit("def walkabout(self, visitor):", 1) self.emit("visitor.visit_%s(self)" % (name,), 2) self.emit("") @@ -183,6 +184,26 @@ have_everything = self.data.required_masks[node] | \ self.data.optional_masks[node] self.emit("self.initialization_state = %i" % (have_everything,), 2) + + def make_mutate_over(self, cons, name): + self.emit("def mutate_over(self, visitor):", 1) + for field in cons.fields: + if (field.type.value not in asdl.builtin_types and + field.type.value not in self.data.simple_types): + if field.opt or field.seq: + level = 3 + self.emit("if self.%s:" % (field.name,), 2) + else: + level = 2 + if field.seq: + sub = (field.name,) + self.emit("visitor._mutate_sequence(self.%s)" % sub, level) + else: + sub = (field.name, field.name) + self.emit("self.%s = self.%s.mutate_over(visitor)" % sub, + level) + self.emit("return visitor.visit_%s(self)" % (name,), 2) + self.emit("") def visitConstructor(self, cons, base, extra_attributes): self.emit("class %s(%s):" % (cons.name, base)) @@ -199,24 +220,7 @@ self.emit("def walkabout(self, visitor):", 1) self.emit("visitor.visit_%s(self)" % (cons.name,), 2) self.emit("") - self.emit("def mutate_over(self, visitor):", 1) - for field in cons.fields: - if field.type.value not in asdl.builtin_types and \ - field.type.value not in self.data.prod_simple: - if field.opt or field.seq: - level = 3 - self.emit("if self.%s:" % (field.name,), 2) - else: - level = 2 - if field.seq: - sub = (field.name,) - self.emit("visitor._mutate_sequence(self.%s)" % sub, level) - else: - sub = (field.name, field.name) - self.emit("self.%s = self.%s.mutate_over(visitor)" % sub, - level) - self.emit("return visitor.visit_%s(self)" % (cons.name,), 2) - self.emit("") + self.make_mutate_over(cons, cons.name) self.make_var_syncer(cons.fields + self.data.cons_attributes[cons], cons, cons.name) Modified: pypy/branch/fast-forward/pypy/interpreter/baseobjspace.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/baseobjspace.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/baseobjspace.py Thu Sep 9 01:00:13 2010 @@ -288,6 +288,7 @@ self.timer.stop("startup " + modname) def finish(self): + self.wait_for_thread_shutdown() w_exitfunc = self.sys.getdictvalue(self, 'exitfunc') if w_exitfunc is not None: self.call_function(w_exitfunc) @@ -305,6 +306,23 @@ for s in self.FrameClass._space_op_types: print s + def wait_for_thread_shutdown(self): + """Wait until threading._shutdown() completes, provided the threading + module was imported in the first place. The shutdown routine will + wait until all non-daemon 'threading' threads have completed.""" + if not self.config.translation.thread: + return + + w_modules = self.sys.get('modules') + w_mod = self.finditem_str(w_modules, 'threading') + if w_mod is None: + return + + try: + self.call_method(w_mod, "_shutdown") + except OperationError, e: + e.write_unraisable(self, "threading._shutdown()") + def reportbytecodecounts(self): os.write(2, "Starting bytecode report.\n") fd = os.open('bytecode.txt', os.O_CREAT|os.O_WRONLY|os.O_TRUNC, 0644) @@ -1098,17 +1116,6 @@ self.wrap('argument must be a unicode')) return self.unicode_w(w_obj) - def path_w(self, w_obj): - """ Like str_w, but if the object is unicode, encode it using - filesystemencoding - """ - filesystemencoding = self.sys.filesystemencoding - if (filesystemencoding and - self.is_true(self.isinstance(w_obj, self.w_unicode))): - w_obj = self.call_method(w_obj, "encode", - self.wrap(filesystemencoding)) - return self.str_w(w_obj) - def bool_w(self, w_obj): # Unwraps a bool, also accepting an int for compatibility. # This is here mostly just for gateway.int_unwrapping_space_method(). Modified: pypy/branch/fast-forward/pypy/interpreter/error.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/error.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/error.py Thu Sep 9 01:00:13 2010 @@ -344,7 +344,7 @@ else: _WINDOWS = True - def wrap_windowserror(space, e, filename=None): + def wrap_windowserror(space, e, w_filename=None): from pypy.rlib import rwin32 winerror = e.winerror @@ -353,19 +353,19 @@ except ValueError: msg = 'Windows Error %d' % winerror exc = space.w_WindowsError - if filename is not None: + if w_filename is not None: w_error = space.call_function(exc, space.wrap(winerror), - space.wrap(msg), space.wrap(filename)) + space.wrap(msg), w_filename) else: w_error = space.call_function(exc, space.wrap(winerror), space.wrap(msg)) return OperationError(exc, w_error) -def wrap_oserror(space, e, filename=None, exception_name='w_OSError'): +def wrap_oserror2(space, e, w_filename=None, exception_name='w_OSError'): assert isinstance(e, OSError) if _WINDOWS and isinstance(e, WindowsError): - return wrap_windowserror(space, e, filename) + return wrap_windowserror(space, e, w_filename) errno = e.errno try: @@ -373,10 +373,21 @@ except ValueError: msg = 'error %d' % errno exc = getattr(space, exception_name) - if filename is not None: + if w_filename is not None: w_error = space.call_function(exc, space.wrap(errno), - space.wrap(msg), space.wrap(filename)) + space.wrap(msg), w_filename) else: - w_error = space.call_function(exc, space.wrap(errno), space.wrap(msg)) + w_error = space.call_function(exc, space.wrap(errno), + space.wrap(msg)) return OperationError(exc, w_error) +wrap_oserror2._annspecialcase_ = 'specialize:arg(3)' + +def wrap_oserror(space, e, filename=None, exception_name='w_OSError'): + if filename is not None: + return wrap_oserror2(space, e, space.wrap(filename), + exception_name=exception_name) + else: + return wrap_oserror2(space, e, None, + exception_name=exception_name) wrap_oserror._annspecialcase_ = 'specialize:arg(3)' + Modified: pypy/branch/fast-forward/pypy/interpreter/gateway.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/gateway.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/gateway.py Thu Sep 9 01:00:13 2010 @@ -137,9 +137,6 @@ def visit_c_nonnegint(self, el, app_sig): self.checked_space_method(el, app_sig) - def visit_path(self, el, app_sig): - self.checked_space_method(el, app_sig) - def visit__Wrappable(self, el, app_sig): name = el.__name__ argname = self.orig_arg() @@ -241,9 +238,6 @@ def visit_bufferstr(self, typ): self.run_args.append("space.bufferstr_w(%s)" % (self.scopenext(),)) - def visit_path(self, typ): - self.run_args.append("space.path_w(%s)" % (self.scopenext(),)) - def visit_nonnegint(self, typ): self.run_args.append("space.nonnegint_w(%s)" % (self.scopenext(),)) @@ -371,9 +365,6 @@ def visit_bufferstr(self, typ): self.unwrap.append("space.bufferstr_w(%s)" % (self.nextarg(),)) - def visit_path(self, typ): - self.unwrap.append("space.path_w(%s)" % (self.nextarg(),)) - def visit_nonnegint(self, typ): self.unwrap.append("space.nonnegint_w(%s)" % (self.nextarg(),)) Modified: pypy/branch/fast-forward/pypy/interpreter/generator.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/generator.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/generator.py Thu Sep 9 01:00:13 2010 @@ -1,7 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.gateway import NoneNotWrapped -from pypy.rlib.rarithmetic import intmask from pypy.rlib import jit from pypy.interpreter.pyopcode import LoopBlock @@ -37,13 +36,17 @@ return next yielded value or raise StopIteration.""" return self.send_ex(w_arg) - def send_ex(self, w_arg, exc=False): + def send_ex(self, w_arg, operr=None): space = self.space if self.running: raise OperationError(space.w_ValueError, space.wrap('generator already executing')) if self.frame.frame_finished_execution: - raise OperationError(space.w_StopIteration, space.w_None) + # xxx a bit ad-hoc, but we don't want to go inside + # execute_generator_frame() if the frame is actually finished + if operr is None: + operr = OperationError(space.w_StopIteration, space.w_None) + raise operr # XXX it's not clear that last_instr should be promoted at all # but as long as it is necessary for call_assembler, let's do it early last_instr = jit.hint(self.frame.last_instr, promote=True) @@ -57,7 +60,7 @@ self.running = True try: try: - w_result = self.frame.execute_generator_frame(w_arg, exc) + w_result = self.frame.execute_generator_frame(w_arg, operr) except OperationError: # errors finish a frame self.frame.frame_finished_execution = True @@ -89,12 +92,7 @@ operr = OperationError(w_type, w_val, tb) operr.normalize_exception(space) - - ec = space.getexecutioncontext() - next_instr = self.frame.handle_operation_error(ec, operr) - self.frame.last_instr = intmask(next_instr - 1) - - return self.send_ex(space.w_None, True) + return self.send_ex(space.w_None, operr) def descr_next(self): """next() -> the next value, or raise StopIteration""" Modified: pypy/branch/fast-forward/pypy/interpreter/pycode.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/pycode.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/pycode.py Thu Sep 9 01:00:13 2010 @@ -4,7 +4,7 @@ The bytecode interpreter itself is implemented by the PyFrame class. """ -import dis, imp, struct, types, new +import dis, imp, struct, types, new, sys from pypy.interpreter import eval from pypy.interpreter.argument import Signature @@ -13,7 +13,7 @@ from pypy.interpreter.baseobjspace import ObjSpace, W_Root from pypy.interpreter.astcompiler.consts import (CO_OPTIMIZED, CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, - CO_GENERATOR, CO_CONTAINSLOOP, CO_CONTAINSGLOBALS) + CO_GENERATOR, CO_CONTAINSGLOBALS) from pypy.rlib.rarithmetic import intmask from pypy.rlib.debug import make_sure_not_resized, make_sure_not_modified from pypy.rlib import jit @@ -118,7 +118,8 @@ self._compute_flatcall() def _freeze_(self): - if self.magic == cpython_magic: + if (self.magic == cpython_magic and + '__pypy__' not in sys.builtin_module_names): raise Exception("CPython host codes should not be rendered") return False @@ -133,9 +134,7 @@ while opcode == opcodedesc.EXTENDED_ARG.index: opcode = ord(co_code[next_instr]) next_instr += 3 - if opcode == opcodedesc.JUMP_ABSOLUTE.index: - self.co_flags |= CO_CONTAINSLOOP - elif opcode == opcodedesc.LOAD_GLOBAL.index: + if opcode == opcodedesc.LOAD_GLOBAL.index: self.co_flags |= CO_CONTAINSGLOBALS elif opcode == opcodedesc.LOAD_NAME.index: self.co_flags |= CO_CONTAINSGLOBALS Modified: pypy/branch/fast-forward/pypy/interpreter/pyframe.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/pyframe.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/pyframe.py Thu Sep 9 01:00:13 2010 @@ -10,6 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated, instantiate from pypy.rlib.jit import hint from pypy.rlib.debug import make_sure_not_resized +from pypy.rlib.rarithmetic import intmask from pypy.rlib import jit, rstack from pypy.tool import stdlib_opcode from pypy.tool.stdlib_opcode import host_bytecode_spec @@ -126,8 +127,12 @@ else: return self.execute_frame() - def execute_generator_frame(self, w_inputvalue, ex=False): - if self.last_instr != -1 and not ex: + def execute_generator_frame(self, w_inputvalue, operr=None): + if operr is not None: + ec = self.space.getexecutioncontext() + next_instr = self.handle_operation_error(ec, operr) + self.last_instr = intmask(next_instr - 1) + elif self.last_instr != -1: self.pushvalue(w_inputvalue) return self.execute_frame() Modified: pypy/branch/fast-forward/pypy/interpreter/pyopcode.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/pyopcode.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/pyopcode.py Thu Sep 9 01:00:13 2010 @@ -211,10 +211,6 @@ next_instr = block.handle(self, unroller) return next_instr # now inside a 'finally' block - if opcode == self.opcodedesc.YIELD_VALUE.index: - #self.last_instr = intmask(next_instr - 1) XXX clean up! - raise Yield - if opcode == self.opcodedesc.END_FINALLY.index: unroller = self.end_finally() if isinstance(unroller, SuspendedUnroller): @@ -239,7 +235,7 @@ if not opdesc.is_enabled(space): continue if opdesc.methodname in ( - 'EXTENDED_ARG', 'RETURN_VALUE', 'YIELD_VALUE', + 'EXTENDED_ARG', 'RETURN_VALUE', 'END_FINALLY', 'JUMP_ABSOLUTE'): continue # opcodes implemented above @@ -810,6 +806,9 @@ self.space.str_w(w_name)) self.pushvalue(w_obj) + def YIELD_VALUE(self, oparg, next_instr): + raise Yield + def jump_absolute(self, jumpto, next_instr, ec): return jumpto @@ -1031,23 +1030,9 @@ self.dropvalues(nargs) self.pushvalue(w_result) - def LOOKUP_METHOD(self, nameindex, next_instr): - # overridden by faster version in the standard object space. - space = self.space - w_obj = self.popvalue() - w_name = self.getname_w(nameindex) - w_value = space.getattr(w_obj, w_name) - self.pushvalue(w_value) - - def CALL_METHOD(self, nargs, next_instr): - # overridden by faster version in the standard object space. - # 'nargs' is the argument count excluding the implicit 'self' - w_callable = self.peekvalue(nargs) - try: - w_result = self.space.call_valuestack(w_callable, nargs, self) - finally: - self.dropvalues(nargs + 1) - self.pushvalue(w_result) + # overridden by faster version in the standard object space. + LOOKUP_METHOD = LOAD_ATTR + CALL_METHOD = CALL_FUNCTION def MISSING_OPCODE(self, oparg, next_instr): ofs = self.last_instr Modified: pypy/branch/fast-forward/pypy/interpreter/test/test_code.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/test/test_code.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/test/test_code.py Thu Sep 9 01:00:13 2010 @@ -184,8 +184,6 @@ # CO_NESTED assert f(4).func_code.co_flags & 0x10 assert f.func_code.co_flags & 0x10 == 0 - # check for CO_CONTAINSLOOP - assert not f.func_code.co_flags & 0x0080 # check for CO_CONTAINSGLOBALS assert not f.func_code.co_flags & 0x0800 @@ -198,9 +196,6 @@ return [l for l in [1, 2, 3, 4]] """ - # check for CO_CONTAINSLOOP - assert f.func_code.co_flags & 0x0080 - assert g.func_code.co_flags & 0x0080 # check for CO_CONTAINSGLOBALS assert f.func_code.co_flags & 0x0800 assert not g.func_code.co_flags & 0x0800 Modified: pypy/branch/fast-forward/pypy/interpreter/test/test_compiler.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/test/test_compiler.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/test/test_compiler.py Thu Sep 9 01:00:13 2010 @@ -4,6 +4,7 @@ from pypy.interpreter.pycode import PyCode from pypy.interpreter.error import OperationError from pypy.interpreter.argument import Arguments +from pypy.conftest import gettestobjspace class BaseTestCompiler: def setup_method(self, method): @@ -852,6 +853,47 @@ sys.stdout = save_stdout output = s.getvalue() assert "STOP_CODE" not in output + + def test_optimize_list_comp(self): + source = """def _f(a): + return [x for x in a if None] + """ + exec source + code = _f.func_code + + import StringIO, sys, dis + s = StringIO.StringIO() + out = sys.stdout + sys.stdout = s + try: + dis.dis(code) + finally: + sys.stdout = out + output = s.getvalue() + assert "LOAD_GLOBAL" not in output + +class AppTestCallMethod(object): + def setup_class(cls): + cls.space = gettestobjspace(**{'objspace.opcodes.CALL_METHOD': True}) + + def test_call_method_kwargs(self): + source = """def _f(a): + return a.f(a=a) + """ + exec source + code = _f.func_code + + import StringIO, sys, dis + s = StringIO.StringIO() + out = sys.stdout + sys.stdout = s + try: + dis.dis(code) + finally: + sys.stdout = out + output = s.getvalue() + assert "CALL_METHOD" in output + class AppTestExceptions: def test_indentation_error(self): Modified: pypy/branch/fast-forward/pypy/interpreter/test/test_gateway.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/test/test_gateway.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/test/test_gateway.py Thu Sep 9 01:00:13 2010 @@ -454,16 +454,6 @@ assert len(l) == 1 assert space.eq_w(l[0], w("foo")) - def test_interp2app_unwrap_spec_path(self, monkeypatch): - space = self.space - def g(space, p): - return p - - app_g = gateway.interp2app(g, unwrap_spec=[gateway.ObjSpace, 'path']) - w_app_g = space.wrap(app_g) - monkeypatch.setattr(space.sys, "filesystemencoding", "utf-8") - w_res = space.call_function(w_app_g, space.wrap(u"?")) - def test_interp2app_classmethod(self): space = self.space w = space.wrap Modified: pypy/branch/fast-forward/pypy/interpreter/test/test_generator.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/test/test_generator.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/test/test_generator.py Thu Sep 9 01:00:13 2010 @@ -126,6 +126,25 @@ raises(ValueError, g.throw, ValueError) assert g.gi_frame is None + def test_throw_bug(self): + def f(): + try: + x.throw(IndexError) # => "generator already executing" + except ValueError: + yield 1 + x = f() + res = list(x) + assert res == [1] + + def test_throw_on_finished_generator(self): + def f(): + yield 1 + g = f() + res = g.next() + assert res == 1 + raises(StopIteration, g.next) + raises(NameError, g.throw, NameError) + def test_close(self): def f(): yield 1 Modified: pypy/branch/fast-forward/pypy/interpreter/test/test_zpy.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/test/test_zpy.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/test/test_zpy.py Thu Sep 9 01:00:13 2010 @@ -3,27 +3,28 @@ import py import sys import pypy +import subprocess pypypath = py.path.local(pypy.__file__).dirpath("bin", "py.py") -def cmdexec(s): - if sys.platform == 'win32': - s = '"%s"' % s # double double quotes - return py.process.cmdexec(s) +def run(*args): + argslist = map(str, args) + popen = subprocess.Popen(argslist, stdout=subprocess.PIPE) + stdout, stderr = popen.communicate() + return stdout def test_executable(): """Ensures sys.executable points to the py.py script""" # TODO : watch out for spaces/special chars in pypypath - output = cmdexec( '''"%s" "%s" -c "import sys;print sys.executable" ''' % - (sys.executable, pypypath) ) + output = run(sys.executable, pypypath, + "-c", "import sys;print sys.executable") assert output.splitlines()[-1] == pypypath def test_special_names(): """Test the __name__ and __file__ special global names""" cmd = "print __name__; print '__file__' in globals()" - output = cmdexec( '''"%s" "%s" -c "%s" ''' % - (sys.executable, pypypath, cmd) ) + output = run(sys.executable, pypypath, '-c', cmd) assert output.splitlines()[-2] == '__main__' assert output.splitlines()[-1] == 'False' @@ -32,26 +33,25 @@ tmpfile.write("print __name__; print __file__\n") tmpfile.close() - output = cmdexec( '''"%s" "%s" "%s" ''' % - (sys.executable, pypypath, tmpfilepath) ) + output = run(sys.executable, pypypath, tmpfilepath) assert output.splitlines()[-2] == '__main__' assert output.splitlines()[-1] == str(tmpfilepath) def test_argv_command(): """Some tests on argv""" # test 1 : no arguments - output = cmdexec( '''"%s" "%s" -c "import sys;print sys.argv" ''' % - (sys.executable, pypypath) ) + output = run(sys.executable, pypypath, + "-c", "import sys;print sys.argv") assert output.splitlines()[-1] == str(['-c']) # test 2 : some arguments after - output = cmdexec( '''"%s" "%s" -c "import sys;print sys.argv" hello''' % - (sys.executable, pypypath) ) + output = run(sys.executable, pypypath, + "-c", "import sys;print sys.argv", "hello") assert output.splitlines()[-1] == str(['-c','hello']) # test 3 : additionnal pypy parameters - output = cmdexec( '''"%s" "%s" -O -c "import sys;print sys.argv" hello''' % - (sys.executable, pypypath) ) + output = run(sys.executable, pypypath, + "-O", "-c", "import sys;print sys.argv", "hello") assert output.splitlines()[-1] == str(['-c','hello']) SCRIPT_1 = """ @@ -65,18 +65,15 @@ tmpfile.close() # test 1 : no arguments - output = cmdexec( '''"%s" "%s" "%s" ''' % - (sys.executable, pypypath, tmpfilepath) ) + output = run(sys.executable, pypypath, tmpfilepath) assert output.splitlines()[-1] == str([tmpfilepath]) # test 2 : some arguments after - output = cmdexec( '''"%s" "%s" "%s" hello''' % - (sys.executable, pypypath, tmpfilepath) ) + output = run(sys.executable, pypypath, tmpfilepath, "hello") assert output.splitlines()[-1] == str([tmpfilepath,'hello']) # test 3 : additionnal pypy parameters - output = cmdexec( '''"%s" "%s" -O "%s" hello''' % - (sys.executable, pypypath, tmpfilepath) ) + output = run(sys.executable, pypypath, "-O", tmpfilepath, "hello") assert output.splitlines()[-1] == str([tmpfilepath,'hello']) @@ -98,11 +95,7 @@ tmpfile.write(TB_NORMALIZATION_CHK) tmpfile.close() - e = None - try: - output = cmdexec( '''"%s" "%s" "%s" ''' % - (sys.executable, pypypath, tmpfilepath) ) - except py.process.cmdexec.Error, e: - pass - assert e," expected failure" - assert e.err.splitlines()[-1] == 'KeyError: ' + popen = subprocess.Popen([sys.executable, str(pypypath), tmpfilepath], + stderr=subprocess.PIPE) + _, stderr = popen.communicate() + assert stderr.endswith('KeyError: \n') Modified: pypy/branch/fast-forward/pypy/jit/backend/detect_cpu.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/detect_cpu.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/detect_cpu.py Thu Sep 9 01:00:13 2010 @@ -56,6 +56,8 @@ return "pypy.jit.backend.x86.runner", "CPU" elif backend_name == 'x86-without-sse2': return "pypy.jit.backend.x86.runner", "CPU386_NO_SSE2" + elif backend_name == 'x86_64': + return "pypy.jit.backend.x86.runner", "CPU_X86_64" elif backend_name == 'cli': return "pypy.jit.backend.cli.runner", "CliCPU" elif backend_name == 'llvm': Modified: pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py Thu Sep 9 01:00:13 2010 @@ -123,6 +123,9 @@ 'setarrayitem_gc' : (('ref', 'int', 'intorptr'), None), 'getarrayitem_gc' : (('ref', 'int'), 'intorptr'), 'getarrayitem_gc_pure' : (('ref', 'int'), 'intorptr'), + 'setarrayitem_raw' : (('ref', 'int', 'intorptr'), None), + 'getarrayitem_raw' : (('ref', 'int'), 'intorptr'), + 'getarrayitem_raw_pure' : (('ref', 'int'), 'intorptr'), 'arraylen_gc' : (('ref',), 'int'), 'call' : (('ref', 'varargs'), 'intorptr'), 'call_assembler' : (('ref', 'varargs'), 'intorptr'), @@ -689,6 +692,18 @@ op_getarrayitem_gc_pure = op_getarrayitem_gc + def op_getarrayitem_raw(self, arraydescr, array, index): + if arraydescr.typeinfo == REF: + raise NotImplementedError("getarrayitem_raw -> gcref") + elif arraydescr.typeinfo == INT: + return do_getarrayitem_raw_int(array, index) + elif arraydescr.typeinfo == FLOAT: + return do_getarrayitem_raw_float(array, index) + else: + raise NotImplementedError + + op_getarrayitem_raw_pure = op_getarrayitem_raw + def op_getfield_gc(self, fielddescr, struct): if fielddescr.typeinfo == REF: return do_getfield_gc_ptr(struct, fielddescr.ofs) @@ -734,6 +749,16 @@ else: raise NotImplementedError + def op_setarrayitem_raw(self, arraydescr, array, index, newvalue): + if arraydescr.typeinfo == REF: + raise NotImplementedError("setarrayitem_raw <- gcref") + elif arraydescr.typeinfo == INT: + do_setarrayitem_raw_int(array, index, newvalue) + elif arraydescr.typeinfo == FLOAT: + do_setarrayitem_raw_float(array, index, newvalue) + else: + raise NotImplementedError + def op_setfield_gc(self, fielddescr, struct, newvalue): if fielddescr.typeinfo == REF: do_setfield_gc_ptr(struct, fielddescr.ofs, newvalue) Modified: pypy/branch/fast-forward/pypy/jit/backend/llsupport/descr.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/llsupport/descr.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/llsupport/descr.py Thu Sep 9 01:00:13 2010 @@ -67,9 +67,11 @@ class BaseFieldDescr(AbstractDescr): offset = 0 # help translation + name = '' _clsname = '' - def __init__(self, offset): + def __init__(self, name, offset): + self.name = name self.offset = offset def sort_key(self): @@ -88,7 +90,7 @@ return self._is_float_field def repr_of_descr(self): - return '<%s %s>' % (self._clsname, self.offset) + return '<%s %s %s>' % (self._clsname, self.name, self.offset) class NonGcPtrFieldDescr(BaseFieldDescr): @@ -113,7 +115,8 @@ offset, _ = symbolic.get_field_token(STRUCT, fieldname, gccache.translate_support_code) FIELDTYPE = getattr(STRUCT, fieldname) - fielddescr = getFieldDescrClass(FIELDTYPE)(offset) + name = '%s.%s' % (STRUCT._name, fieldname) + fielddescr = getFieldDescrClass(FIELDTYPE)(name, offset) cachedict = cache.setdefault(STRUCT, {}) cachedict[fieldname] = fielddescr return fielddescr @@ -195,9 +198,13 @@ try: return cache[ARRAY] except KeyError: + # we only support Arrays that are either GcArrays, or raw no-length + # non-gc Arrays. if ARRAY._hints.get('nolength', False): + assert not isinstance(ARRAY, lltype.GcArray) arraydescr = getArrayNoLengthDescrClass(ARRAY)() else: + assert isinstance(ARRAY, lltype.GcArray) arraydescr = getArrayDescrClass(ARRAY)() # verify basic assumption that all arrays' basesize and ofslength # are equal Modified: pypy/branch/fast-forward/pypy/jit/backend/llsupport/gc.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/llsupport/gc.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/llsupport/gc.py Thu Sep 9 01:00:13 2010 @@ -251,13 +251,25 @@ if oldgcmap: lltype.free(oldgcmap, flavor='raw') - def get_basic_shape(self): - return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 4(%ebp) - chr(self.LOC_EBP_MINUS | 4), # saved %ebx: at -4(%ebp) - chr(self.LOC_EBP_MINUS | 8), # saved %esi: at -8(%ebp) - chr(self.LOC_EBP_MINUS | 12), # saved %edi: at -12(%ebp) - chr(self.LOC_EBP_PLUS | 0), # saved %ebp: at (%ebp) - chr(0)] + def get_basic_shape(self, is_64_bit=False): + # XXX: Should this code even really know about stack frame layout of + # the JIT? + if is_64_bit: + return [chr(self.LOC_EBP_PLUS | 8), + chr(self.LOC_EBP_MINUS | 8), + chr(self.LOC_EBP_MINUS | 16), + chr(self.LOC_EBP_MINUS | 24), + chr(self.LOC_EBP_MINUS | 32), + chr(self.LOC_EBP_MINUS | 40), + chr(self.LOC_EBP_PLUS | 0), + chr(0)] + else: + return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 4(%ebp) + chr(self.LOC_EBP_MINUS | 4), # saved %ebx: at -4(%ebp) + chr(self.LOC_EBP_MINUS | 8), # saved %esi: at -8(%ebp) + chr(self.LOC_EBP_MINUS | 12), # saved %edi: at -12(%ebp) + chr(self.LOC_EBP_PLUS | 0), # saved %ebp: at (%ebp) + chr(0)] def _encode_num(self, shape, number): assert number >= 0 @@ -276,17 +288,9 @@ num = self.LOC_EBP_MINUS | (-offset) self._encode_num(shape, num) - def add_ebx(self, shape): - shape.append(chr(self.LOC_REG | 4)) - - def add_esi(self, shape): - shape.append(chr(self.LOC_REG | 8)) - - def add_edi(self, shape): - shape.append(chr(self.LOC_REG | 12)) - - def add_ebp(self, shape): - shape.append(chr(self.LOC_REG | 16)) + def add_callee_save_reg(self, shape, reg_index): + assert reg_index > 0 + shape.append(chr(self.LOC_REG | (reg_index << 2))) def compress_callshape(self, shape): # Similar to compress_callshape() in trackgcroot.py. @@ -328,7 +332,7 @@ DEBUG = False # forced to True by x86/test/test_zrpy_gc.py def __init__(self, gcdescr, translator, rtyper, llop1=llop): - from pypy.rpython.memory.gctypelayout import _check_typeid + from pypy.rpython.memory.gctypelayout import check_typeid from pypy.rpython.memory.gcheader import GCHeaderBuilder from pypy.rpython.memory.gctransform import framework GcLLDescription.__init__(self, gcdescr, translator, rtyper) @@ -351,7 +355,7 @@ gcrootmap = cls() self.gcrootmap = gcrootmap self.gcrefs = GcRefList() - self.single_gcref_descr = GcPtrFieldDescr(0) + self.single_gcref_descr = GcPtrFieldDescr('', 0) # make a TransformerLayoutBuilder and save it on the translator # where it can be fished and reused by the FrameworkGCTransformer @@ -375,7 +379,7 @@ def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) has_finalizer = bool(tid & (1<' % o + assert descr2.repr_of_descr() == '' % o # descr2i = get_field_descr(c0, S, 'x') o, _ = symbolic.get_field_token(S, 'x', False) - assert descr2i.repr_of_descr() == '' % o + assert descr2i.repr_of_descr() == '' % o # descr3 = get_array_descr(c0, lltype.GcArray(lltype.Ptr(S))) assert descr3.repr_of_descr() == '' Modified: pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_gc.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_gc.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_gc.py Thu Sep 9 01:00:13 2010 @@ -73,16 +73,16 @@ gcrootmap.add_ebp_offset(shape, num1) gcrootmap.add_ebp_offset(shape, num2) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a]) - gcrootmap.add_ebx(shape) + gcrootmap.add_callee_save_reg(shape, 1) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a, 4]) - gcrootmap.add_esi(shape) + gcrootmap.add_callee_save_reg(shape, 2) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a, 4, 8]) - gcrootmap.add_edi(shape) + gcrootmap.add_callee_save_reg(shape, 3) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a, 4, 8, 12]) - gcrootmap.add_ebp(shape) + gcrootmap.add_callee_save_reg(shape, 4) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a, 4, 8, 12, 16]) # Modified: pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_regalloc.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_regalloc.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_regalloc.py Thu Sep 9 01:00:13 2010 @@ -1,5 +1,5 @@ -from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat +from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT from pypy.jit.backend.llsupport.regalloc import FrameManager from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan @@ -26,9 +26,20 @@ def convert_to_imm(self, v): return v +class FakeFramePos(object): + def __init__(self, pos, box_type): + self.pos = pos + self.box_type = box_type + + def frame_size(self): + if self.box_type == FLOAT: + return 2 + else: + return 1 + class TFrameManager(FrameManager): - def frame_pos(self, i, size): - return i + def frame_pos(self, i, box_type): + return FakeFramePos(i, box_type) class MockAsm(object): def __init__(self): @@ -146,8 +157,8 @@ rm.next_instruction() # allocate a stack position b0, b1, b2, b3, b4 = boxes - sp = fm.loc(b0, 1) - assert sp == 0 + sp = fm.loc(b0) + assert sp.pos == 0 loc = rm.make_sure_var_in_reg(b0) assert isinstance(loc, FakeReg) rm._check_invariants() @@ -207,13 +218,13 @@ asm = MockAsm() rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) rm.next_instruction() - fm.loc(b0, 1) + fm.loc(b0) rm.force_result_in_reg(b1, b0) rm._check_invariants() loc = rm.loc(b1) assert isinstance(loc, FakeReg) loc = rm.loc(b0) - assert isinstance(loc, int) + assert isinstance(loc, FakeFramePos) assert len(asm.moves) == 1 def test_return_constant(self): @@ -304,7 +315,7 @@ def test_different_frame_width(self): class XRegisterManager(RegisterManager): - reg_width = 2 + pass fm = TFrameManager() b0 = BoxInt() Modified: pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py Thu Sep 9 01:00:13 2010 @@ -461,6 +461,25 @@ [funcbox] + args, 'float', descr=calldescr) assert abs(res.value - 4.6) < 0.0001 + + def test_call_many_arguments(self): + # Test calling a function with a large number of arguments (more than + # 6, which will force passing some arguments on the stack on 64-bit) + + def func(*args): + assert len(args) == 16 + # Try to sum up args in a way that would probably detect a + # transposed argument + return sum(arg * (2**i) for i, arg in enumerate(args)) + + FUNC = self.FuncType([lltype.Signed]*16, lltype.Signed) + FPTR = self.Ptr(FUNC) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + func_ptr = llhelper(FPTR, func) + args = range(16) + funcbox = self.get_funcbox(self.cpu, func_ptr) + res = self.execute_operation(rop.CALL, [funcbox] + map(BoxInt, args), 'int', descr=calldescr) + assert res.value == func(*args) def test_call_stack_alignment(self): # test stack alignment issues, notably for Mac OS/X. @@ -638,6 +657,21 @@ assert r.value == 1 def test_array_basic(self): + a_box, A = self.alloc_array_of(rffi.SHORT, 342) + arraydescr = self.cpu.arraydescrof(A) + assert not arraydescr.is_array_of_pointers() + # + r = self.execute_operation(rop.ARRAYLEN_GC, [a_box], + 'int', descr=arraydescr) + assert r.value == 342 + r = self.execute_operation(rop.SETARRAYITEM_GC, [a_box, BoxInt(310), + BoxInt(744)], + 'void', descr=arraydescr) + assert r is None + r = self.execute_operation(rop.GETARRAYITEM_GC, [a_box, BoxInt(310)], + 'int', descr=arraydescr) + assert r.value == 744 + a_box, A = self.alloc_array_of(lltype.Signed, 342) arraydescr = self.cpu.arraydescrof(A) assert not arraydescr.is_array_of_pointers() @@ -751,6 +785,20 @@ 'float', descr=arraydescr) assert r.value == 4.5 + # For platforms where sizeof(INT) != sizeof(Signed) (ie, x86-64) + a_box, A = self.alloc_array_of(rffi.INT, 342) + arraydescr = self.cpu.arraydescrof(A) + assert not arraydescr.is_array_of_pointers() + r = self.execute_operation(rop.ARRAYLEN_GC, [a_box], + 'int', descr=arraydescr) + assert r.value == 342 + r = self.execute_operation(rop.SETARRAYITEM_GC, [a_box, BoxInt(310), + BoxInt(7441)], + 'void', descr=arraydescr) + assert r is None + r = self.execute_operation(rop.GETARRAYITEM_GC, [a_box, BoxInt(310)], + 'int', descr=arraydescr) + assert r.value == 7441 def test_string_basic(self): s_box = self.alloc_string("hello\xfe") @@ -978,6 +1026,8 @@ else: assert 0 operations.append(ResOperation(opnum, boxargs, boxres)) + # Unique-ify inputargs + inputargs = list(set(inputargs)) faildescr = BasicFailDescr(1) operations.append(ResOperation(rop.FINISH, [], None, descr=faildescr)) @@ -1050,9 +1100,11 @@ descr=BasicFailDescr(5))] operations[1].fail_args = [] looptoken = LoopToken() - self.cpu.compile_loop(list(testcase), operations, + # Use "set" to unique-ify inputargs + unique_testcase_list = list(set(testcase)) + self.cpu.compile_loop(unique_testcase_list, operations, looptoken) - for i, box in enumerate(testcase): + for i, box in enumerate(unique_testcase_list): self.cpu.set_future_value_float(i, box.value) fail = self.cpu.execute_token(looptoken) if fail.identifier != 5 - (expected_id^expected): @@ -1695,7 +1747,7 @@ def test_assembler_call(self): called = [] def assembler_helper(failindex, virtualizable): - assert self.cpu.get_latest_value_int(0) == 10 + assert self.cpu.get_latest_value_int(0) == 97 called.append(failindex) return 4 + 9 @@ -1708,33 +1760,41 @@ _assembler_helper_ptr) ops = ''' - [i0, i1] - i2 = int_add(i0, i1) - finish(i2)''' + [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] + i10 = int_add(i0, i1) + i11 = int_add(i10, i2) + i12 = int_add(i11, i3) + i13 = int_add(i12, i4) + i14 = int_add(i13, i5) + i15 = int_add(i14, i6) + i16 = int_add(i15, i7) + i17 = int_add(i16, i8) + i18 = int_add(i17, i9) + finish(i18)''' loop = parse(ops) looptoken = LoopToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - ARGS = [lltype.Signed, lltype.Signed] + ARGS = [lltype.Signed] * 10 RES = lltype.Signed self.cpu.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES) - self.cpu.set_future_value_int(0, 1) - self.cpu.set_future_value_int(1, 2) + for i in range(10): + self.cpu.set_future_value_int(i, i+1) res = self.cpu.execute_token(looptoken) - assert self.cpu.get_latest_value_int(0) == 3 + assert self.cpu.get_latest_value_int(0) == 55 ops = ''' - [i4, i5] - i6 = int_add(i4, 1) - i3 = call_assembler(i6, i5, descr=looptoken) + [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] + i10 = int_add(i0, 42) + i11 = call_assembler(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, descr=looptoken) guard_not_forced()[] - finish(i3) + finish(i11) ''' loop = parse(ops, namespace=locals()) othertoken = LoopToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - self.cpu.set_future_value_int(0, 4) - self.cpu.set_future_value_int(1, 5) + for i in range(10): + self.cpu.set_future_value_int(i, i+1) res = self.cpu.execute_token(othertoken) assert self.cpu.get_latest_value_int(0) == 13 assert called @@ -1786,6 +1846,31 @@ assert self.cpu.get_latest_value_float(0) == 13.5 assert called + def test_raw_malloced_getarrayitem(self): + ARRAY = rffi.CArray(lltype.Signed) + descr = self.cpu.arraydescrof(ARRAY) + a = lltype.malloc(ARRAY, 10, flavor='raw') + a[7] = -4242 + addr = llmemory.cast_ptr_to_adr(a) + abox = BoxInt(heaptracker.adr2int(addr)) + r1 = self.execute_operation(rop.GETARRAYITEM_RAW, [abox, BoxInt(7)], + 'int', descr=descr) + assert r1.getint() == -4242 + lltype.free(a, flavor='raw') + + def test_raw_malloced_setarrayitem(self): + ARRAY = rffi.CArray(lltype.Signed) + descr = self.cpu.arraydescrof(ARRAY) + a = lltype.malloc(ARRAY, 10, flavor='raw') + addr = llmemory.cast_ptr_to_adr(a) + abox = BoxInt(heaptracker.adr2int(addr)) + self.execute_operation(rop.SETARRAYITEM_RAW, [abox, BoxInt(5), + BoxInt(12345)], + 'void', descr=descr) + assert a[5] == 12345 + lltype.free(a, flavor='raw') + + class OOtypeBackendTest(BaseBackendTest): type_system = 'ootype' Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py Thu Sep 9 01:00:13 2010 @@ -1,4 +1,4 @@ -import sys +import sys, os from pypy.jit.backend.llsupport import symbolic from pypy.jit.metainterp.history import Const, Box, BoxInt, BoxPtr, BoxFloat from pypy.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT,\ @@ -7,23 +7,35 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.tool.uid import fixid -from pypy.jit.backend.x86.regalloc import RegAlloc, WORD,\ - X86RegisterManager, X86XMMRegisterManager, get_ebp_ofs, FRAME_FIXED_SIZE,\ - FORCE_INDEX_OFS +from pypy.jit.backend.x86.regalloc import RegAlloc, \ + X86RegisterManager, X86XMMRegisterManager, get_ebp_ofs + +from pypy.jit.backend.x86.arch import FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, IS_X86_32, IS_X86_64 + +from pypy.jit.backend.x86.regloc import (eax, ecx, edx, ebx, + esp, ebp, esi, edi, + xmm0, xmm1, xmm2, xmm3, + xmm4, xmm5, xmm6, xmm7, + r8, r9, r10, r11, + r12, r13, r14, r15, + X86_64_SCRATCH_REG, + X86_64_XMM_SCRATCH_REG, + RegLoc, StackLoc, ConstFloatLoc, + ImmedLoc, AddressLoc, imm) + from pypy.rlib.objectmodel import we_are_translated, specialize -from pypy.jit.backend.x86 import codebuf -from pypy.jit.backend.x86.ri386 import * -from pypy.jit.metainterp.resoperation import rop +from pypy.jit.backend.x86 import rx86, regloc, codebuf +from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.backend.x86.support import values_array from pypy.rlib.debug import debug_print from pypy.rlib import rgc - -# our calling convention - we pass first 6 args in registers -# and the rest stays on the stack +from pypy.jit.backend.x86.jump import remap_frame_layout +from pypy.rlib.streamio import open_file_as_stream +from pypy.jit.metainterp.history import ConstInt, BoxInt # darwin requires the stack to be 16 bytes aligned on calls. Same for gcc 4.5.0, # better safe than sorry -CALL_ALIGN = 4 +CALL_ALIGN = 16 // WORD def align_stack_words(words): return (words + CALL_ALIGN - 1) & ~(CALL_ALIGN-1) @@ -31,16 +43,36 @@ class MachineCodeBlockWrapper(object): MC_DEFAULT_SIZE = 1024*1024 - def __init__(self, bigsize, profile_agent=None): + def __init__(self, assembler, bigsize, profile_agent=None): + self.assembler = assembler self.old_mcs = [] # keepalive self.bigsize = bigsize self._mc = self._instantiate_mc() self.function_name = None self.profile_agent = profile_agent + self.reset_reserved_bytes() def _instantiate_mc(self): # hook for testing return codebuf.MachineCodeBlock(self.bigsize) + def ensure_bytes_available(self, num_bytes): + if self.bytes_free() <= (self._reserved_bytes + num_bytes): + self.make_new_mc() + + def reserve_bytes(self, num_bytes): + self.ensure_bytes_available(num_bytes) + self._reserved_bytes += num_bytes + + def reset_reserved_bytes(self): + # XXX er.... pretty random number, just to be sure + # not to write half-instruction + self._reserved_bytes = 64 + + def get_relative_pos(self): + return self._mc.get_relative_pos() + + def overwrite(self, pos, listofchars): + return self._mc.overwrite(pos, listofchars) def bytes_free(self): return self._mc._size - self._mc.get_relative_pos() @@ -62,12 +94,25 @@ def make_new_mc(self): new_mc = self._instantiate_mc() debug_print('[new machine code block at', new_mc.tell(), ']') - self._mc.JMP(rel32(new_mc.tell())) + + if IS_X86_64: + # The scratch register is sometimes used as a temporary + # register, but the JMP below might clobber it. Rather than risk + # subtle bugs, we preserve the scratch register across the jump. + self._mc.PUSH_r(X86_64_SCRATCH_REG.value) + + self._mc.JMP(imm(new_mc.tell())) + + if IS_X86_64: + # Restore scratch reg + new_mc.POP_r(X86_64_SCRATCH_REG.value) if self.function_name is not None: self.end_function(done=False) self.start_pos = new_mc.get_relative_pos() + self.assembler.write_pending_failure_recoveries() + self._mc.done() self.old_mcs.append(self._mc) self._mc = new_mc @@ -81,24 +126,39 @@ def _new_method(name): def method(self, *args): - # XXX er.... pretty random number, just to be sure - # not to write half-instruction - if self.bytes_free() < 64: + if self.bytes_free() < self._reserved_bytes: self.make_new_mc() getattr(self._mc, name)(*args) method.func_name = name return method +for _name in rx86.all_instructions + regloc.all_extra_instructions: + setattr(MachineCodeBlockWrapper, _name, _new_method(_name)) + for name in dir(codebuf.MachineCodeBlock): if name.upper() == name or name == "writechr": setattr(MachineCodeBlockWrapper, name, _new_method(name)) +class GuardToken(object): + def __init__(self, faildescr, failargs, fail_locs, exc, desc_bytes): + self.faildescr = faildescr + self.failargs = failargs + self.fail_locs = fail_locs + self.exc = exc + self.desc_bytes = desc_bytes + + def recovery_stub_size(self): + # XXX: 32 is pulled out of the air + return 32 + len(self.desc_bytes) + +DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', ('i', lltype.Signed)) + class Assembler386(object): mc = None - mc2 = None mc_size = MachineCodeBlockWrapper.MC_DEFAULT_SIZE _float_constants = None _regalloc = None + _output_loop_log = None def __init__(self, cpu, translate_support_code=False, failargs_limit=1000): @@ -113,18 +173,26 @@ self.fail_boxes_ptr = values_array(llmemory.GCREF, failargs_limit) self.fail_boxes_float = values_array(lltype.Float, failargs_limit) self.fail_ebp = 0 - self.loc_float_const_neg = None - self.loc_float_const_abs = None + self.loop_run_counters = [] + # if we have 10000 loops, we have some other problems I guess + self.float_const_neg_addr = 0 + self.float_const_abs_addr = 0 self.malloc_fixedsize_slowpath1 = 0 self.malloc_fixedsize_slowpath2 = 0 + self.pending_guard_tokens = None self.setup_failure_recovery() + self._debug = False + self.debug_counter_descr = cpu.fielddescrof(DEBUG_COUNTER, 'i') def leave_jitted_hook(self): ptrs = self.fail_boxes_ptr.ar llop.gc_assume_young_pointers(lltype.Void, llmemory.cast_ptr_to_adr(ptrs)) - def make_sure_mc_exists(self): + def set_debug(self, v): + self._debug = v + + def setup(self): if self.mc is None: # the address of the function called by 'new' gc_ll_descr = self.cpu.gc_ll_descr @@ -143,11 +211,7 @@ ll_new_unicode = gc_ll_descr.get_funcptr_for_newunicode() self.malloc_unicode_func_addr = rffi.cast(lltype.Signed, ll_new_unicode) - # done - # we generate the loop body in 'mc' - # 'mc2' is for guard recovery code - self.mc = MachineCodeBlockWrapper(self.mc_size, self.cpu.profile_agent) - self.mc2 = MachineCodeBlockWrapper(self.mc_size) + self.mc = MachineCodeBlockWrapper(self, self.mc_size, self.cpu.profile_agent) self._build_failure_recovery(False) self._build_failure_recovery(True) if self.cpu.supports_floats: @@ -157,46 +221,73 @@ self._build_float_constants() if hasattr(gc_ll_descr, 'get_malloc_fixedsize_slowpath_addr'): self._build_malloc_fixedsize_slowpath() + s = os.environ.get('PYPYLOG') + if s: + if s.find(':') != -1: + s = s.split(':')[-1] + self.set_debug(True) + self._output_loop_log = s + ".count" + # Intialize here instead of __init__ to prevent + # pending_guard_tokens from being considered a prebuilt object, + # which sometimes causes memory leaks since the prebuilt list is + # still considered a GC root after we re-assign + # pending_guard_tokens in write_pending_failure_recoveries + self.pending_guard_tokens = [] + + def finish_once(self): + if self._debug: + output_log = self._output_loop_log + assert output_log is not None + f = open_file_as_stream(output_log, "w") + for i in range(len(self.loop_run_counters)): + name, struct = self.loop_run_counters[i] + f.write(str(struct.i) + " " * (8 - len(str(struct.i))) + name + "\n") + f.close() def _build_float_constants(self): - # 11 words: 8 words for the data, and up to 3 words for alignment - addr = lltype.malloc(rffi.CArray(lltype.Signed), 11, flavor='raw') + # 44 bytes: 32 bytes for the data, and up to 12 bytes for alignment + addr = lltype.malloc(rffi.CArray(lltype.Char), 44, flavor='raw') if not we_are_translated(): self._keepalive_malloced_float_consts = addr float_constants = rffi.cast(lltype.Signed, addr) float_constants = (float_constants + 15) & ~15 # align to 16 bytes - addr = rffi.cast(rffi.CArrayPtr(lltype.Signed), float_constants) - addr[0] = 0 # \ - addr[1] = -2147483648 # / for neg - addr[2] = 0 # - addr[3] = 0 # - addr[4] = -1 # \ - addr[5] = 2147483647 # / for abs - addr[6] = 0 # - addr[7] = 0 # - self.loc_float_const_neg = heap64(float_constants) - self.loc_float_const_abs = heap64(float_constants + 16) + addr = rffi.cast(rffi.CArrayPtr(lltype.Char), float_constants) + qword_padding = '\x00\x00\x00\x00\x00\x00\x00\x00' + # 0x8000000000000000 + neg_const = '\x00\x00\x00\x00\x00\x00\x00\x80' + # 0x7FFFFFFFFFFFFFFF + abs_const = '\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x7F' + data = neg_const + qword_padding + abs_const + qword_padding + for i in range(len(data)): + addr[i] = data[i] + self.float_const_neg_addr = float_constants + self.float_const_abs_addr = float_constants + 16 def _build_malloc_fixedsize_slowpath(self): - mc = self.mc2._mc # ---------- first helper for the slow path of malloc ---------- - self.malloc_fixedsize_slowpath1 = mc.tell() + self.malloc_fixedsize_slowpath1 = self.mc.tell() if self.cpu.supports_floats: # save the XMM registers in - for i in range(8): # the *caller* frame, from esp+8 - mc.MOVSD(mem64(esp, 8+8*i), xmm_registers[i]) - mc.SUB(edx, eax) # compute the size we want - mc.MOV(mem(esp, 4), edx) # save it as the new argument + for i in range(self.cpu.NUM_REGS):# the *caller* frame, from esp+8 + self.mc.MOVSD_sx((WORD*2)+8*i, i) + self.mc.SUB_rr(edx.value, eax.value) # compute the size we want + if IS_X86_32: + self.mc.MOV_sr(WORD, edx.value) # save it as the new argument + elif IS_X86_64: + # rdi can be clobbered: its content was forced to the stack + # by _fastpath_malloc(), like all other save_around_call_regs. + self.mc.MOV_rr(edi.value, edx.value) + addr = self.cpu.gc_ll_descr.get_malloc_fixedsize_slowpath_addr() - mc.JMP(rel32(addr)) # tail call to the real malloc + self.mc.JMP(imm(addr)) # tail call to the real malloc # ---------- second helper for the slow path of malloc ---------- - self.malloc_fixedsize_slowpath2 = mc.tell() + self.malloc_fixedsize_slowpath2 = self.mc.tell() if self.cpu.supports_floats: # restore the XMM registers - for i in range(8): # from where they were saved - mc.MOVSD(xmm_registers[i], mem64(esp, 8+8*i)) + for i in range(self.cpu.NUM_REGS):# from where they were saved + self.mc.MOVSD_xs(i, (WORD*2)+8*i) nursery_free_adr = self.cpu.gc_ll_descr.get_nursery_free_addr() - mc.MOV(edx, heap(nursery_free_adr)) # load this in EDX - mc.RET() - self.mc2.done() + self.mc.MOV(edx, heap(nursery_free_adr)) # load this in EDX + self.mc.RET() + self.mc.done() def assemble_loop(self, inputargs, operations, looptoken): """adds the following attributes to looptoken: @@ -207,15 +298,18 @@ _x86_param_depth _x86_arglocs """ + if not we_are_translated(): + # Arguments should be unique + assert len(set(inputargs)) == len(inputargs) + + self.setup() funcname = self._find_debug_merge_point(operations) - self.make_sure_mc_exists() + regalloc = RegAlloc(self, self.cpu.translate_support_code) + operations = self._inject_debugging_code(operations) arglocs = regalloc.prepare_loop(inputargs, operations, looptoken) looptoken._x86_arglocs = arglocs - needed_mem = len(arglocs[0]) * 16 + 16 - if needed_mem >= self.mc.bytes_free(): - self.mc.make_new_mc() # profile support name = "Loop # %s: %s" % (looptoken.number, funcname) @@ -230,29 +324,30 @@ self._patch_stackadjust(adr_stackadjust, frame_depth+param_depth) looptoken._x86_frame_depth = frame_depth looptoken._x86_param_depth = param_depth - # we need to make sure here that we don't overload an mc badly. - # a safe estimate is that we need at most 16 bytes per arg - needed_mem = len(arglocs[0]) * 16 + 16 - if needed_mem >= self.mc.bytes_free(): - self.mc.make_new_mc() + looptoken._x86_direct_bootstrap_code = self.mc.tell() self._assemble_bootstrap_direct_call(arglocs, curadr, frame_depth+param_depth) debug_print("Loop #", looptoken.number, "has address", looptoken._x86_loop_code, "to", self.mc.tell()) self.mc.end_function() + self.write_pending_failure_recoveries() - def assemble_bridge(self, faildescr, inputargs, operations): + if not we_are_translated(): + # Arguments should be unique + assert len(set(inputargs)) == len(inputargs) + + self.setup() funcname = self._find_debug_merge_point(operations) - self.make_sure_mc_exists() arglocs = self.rebuild_faillocs_from_descr( faildescr._x86_failure_recovery_bytecode) if not we_are_translated(): assert ([loc.assembler() for loc in arglocs] == [loc.assembler() for loc in faildescr._x86_debug_faillocs]) regalloc = RegAlloc(self, self.cpu.translate_support_code) + operations = self._inject_debugging_code(operations) fail_depths = faildescr._x86_current_depths regalloc.prepare_bridge(fail_depths, inputargs, arglocs, operations) @@ -276,25 +371,81 @@ descr_number, "has address", adr_bridge, "to", self.mc.tell()) self.mc.end_function() + self.write_pending_failure_recoveries() + + def write_pending_failure_recoveries(self): + for tok in self.pending_guard_tokens: + # Okay to write to _mc because we've already made sure that + # there's enough space by "reserving" bytes. + addr = self.generate_quick_failure(self.mc._mc, tok.faildescr, tok.failargs, tok.fail_locs, tok.exc, tok.desc_bytes) + tok.faildescr._x86_adr_recovery_stub = addr + self.patch_jump_for_descr(tok.faildescr, addr) + + self.pending_guard_tokens = [] + self.mc.reset_reserved_bytes() + self.mc.done() def _find_debug_merge_point(self, operations): + for op in operations: if op.opnum == rop.DEBUG_MERGE_POINT: - return op.args[0]._get_str() - return "" + funcname = op.args[0]._get_str() + break + else: + funcname = "" % len(self.loop_run_counters) + # invent the counter, so we don't get too confused + if self._debug: + struct = lltype.malloc(DEBUG_COUNTER, flavor='raw') + struct.i = 0 + self.loop_run_counters.append((funcname, struct)) + return funcname def patch_jump_for_descr(self, faildescr, adr_new_target): adr_jump_offset = faildescr._x86_adr_jump_offset - mc = codebuf.InMemoryCodeBuilder(adr_jump_offset, adr_jump_offset + 4) - mc.write(packimm32(adr_new_target - adr_jump_offset - 4)) + adr_recovery_stub = faildescr._x86_adr_recovery_stub + offset = adr_new_target - (adr_jump_offset + 4) + # If the new target fits within a rel32 of the jump, just patch + # that. Otherwise, leave the original rel32 to the recovery stub in + # place, but clobber the recovery stub with a jump to the real + # target. + if rx86.fits_in_32bits(offset): + mc = codebuf.InMemoryCodeBuilder(adr_jump_offset, adr_jump_offset + 4) + mc.writeimm32(offset) + else: + # "mov r11, addr; jmp r11" is 13 bytes + mc = codebuf.InMemoryCodeBuilder(adr_recovery_stub, adr_recovery_stub + 13) + mc.MOV_ri(X86_64_SCRATCH_REG.value, adr_new_target) + mc.JMP_r(X86_64_SCRATCH_REG.value) + mc.valgrind_invalidated() mc.done() + def _inject_debugging_code(self, operations): + if self._debug: + # before doing anything, let's increase a counter + c_adr = ConstInt(rffi.cast(lltype.Signed, + self.loop_run_counters[-1][1])) + box = BoxInt() + box2 = BoxInt() + ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], + box, descr=self.debug_counter_descr), + ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), + ResOperation(rop.SETFIELD_RAW, [c_adr, box2], + None, descr=self.debug_counter_descr)] + operations = ops + operations + # # we need one register free (a bit of a hack, but whatever) + # self.mc.PUSH(eax) + # adr = rffi.cast(lltype.Signed, self.loop_run_counters[-1][1]) + # self.mc.MOV(eax, heap(adr)) + # self.mc.ADD(eax, imm(1)) + # self.mc.MOV(heap(adr), eax) + # self.mc.POP(eax) + return operations + def _assemble(self, regalloc, operations): self._regalloc = regalloc regalloc.walk_operations(operations) self.mc.done() - self.mc2.done() if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging frame_depth = regalloc.fm.frame_depth @@ -309,7 +460,7 @@ def _patchable_stackadjust(self): # stack adjustment LEA - self.mc.LEA(esp, fixedsize_ebp_ofs(0)) + self.mc.LEA32_rb(esp.value, 0) return self.mc.tell() - 4 def _patch_stackadjust(self, adr_lea, reserved_depth): @@ -318,23 +469,34 @@ # Compute the correct offset for the instruction LEA ESP, [EBP-4*words]. # Given that [EBP] is where we saved EBP, i.e. in the last word # of our fixed frame, then the 'words' value is: - words = (FRAME_FIXED_SIZE - 1) + reserved_depth + words = (self.cpu.FRAME_FIXED_SIZE - 1) + reserved_depth # align, e.g. for Mac OS X aligned_words = align_stack_words(words+2)-2 # 2 = EIP+EBP - mc.write(packimm32(-WORD * aligned_words)) + mc.writeimm32(-WORD * aligned_words) mc.done() def _call_header(self): - self.mc.PUSH(ebp) - self.mc.MOV(ebp, esp) - self.mc.PUSH(ebx) - self.mc.PUSH(esi) - self.mc.PUSH(edi) + self.mc.PUSH_r(ebp.value) + self.mc.MOV_rr(ebp.value, esp.value) + for regloc in self.cpu.CALLEE_SAVE_REGISTERS: + self.mc.PUSH_r(regloc.value) + # NB. the shape of the frame is hard-coded in get_basic_shape() too. # Also, make sure this is consistent with FRAME_FIXED_SIZE. return self._patchable_stackadjust() + def _call_footer(self): + self.mc.LEA_rb(esp.value, -len(self.cpu.CALLEE_SAVE_REGISTERS) * WORD) + + for i in range(len(self.cpu.CALLEE_SAVE_REGISTERS)-1, -1, -1): + self.mc.POP_r(self.cpu.CALLEE_SAVE_REGISTERS[i].value) + + self.mc.POP_r(ebp.value) + self.mc.RET() + def _assemble_bootstrap_direct_call(self, arglocs, jmpadr, stackdepth): + if IS_X86_64: + return self._assemble_bootstrap_direct_call_64(arglocs, jmpadr, stackdepth) # XXX pushing ebx esi and edi is a bit pointless, since we store # all regsiters anyway, for the case of guard_not_forced # XXX this can be improved greatly. Right now it'll behave like @@ -345,23 +507,81 @@ self._patch_stackadjust(adr_stackadjust, stackdepth) for i in range(len(nonfloatlocs)): loc = nonfloatlocs[i] - if isinstance(loc, REG): - self.mc.MOV(loc, mem(ebp, (2 + i) * WORD)) + if isinstance(loc, RegLoc): + assert not loc.is_xmm + self.mc.MOV_rb(loc.value, (2 + i) * WORD) loc = floatlocs[i] - if isinstance(loc, XMMREG): - self.mc.MOVSD(loc, mem64(ebp, (1 + i) * 2 * WORD)) + if isinstance(loc, RegLoc): + assert loc.is_xmm + self.mc.MOVSD_xb(loc.value, (1 + i) * 2 * WORD) tmp = eax xmmtmp = xmm0 for i in range(len(nonfloatlocs)): loc = nonfloatlocs[i] - if loc is not None and not isinstance(loc, REG): - self.mc.MOV(tmp, mem(ebp, (2 + i) * WORD)) + if loc is not None and not isinstance(loc, RegLoc): + self.mc.MOV_rb(tmp.value, (2 + i) * WORD) self.mc.MOV(loc, tmp) loc = floatlocs[i] - if loc is not None and not isinstance(loc, XMMREG): - self.mc.MOVSD(xmmtmp, mem64(ebp, (1 + i) * 2 * WORD)) - self.mc.MOVSD(loc, xmmtmp) - self.mc.JMP(rel32(jmpadr)) + if loc is not None and not isinstance(loc, RegLoc): + self.mc.MOVSD_xb(xmmtmp.value, (1 + i) * 2 * WORD) + assert isinstance(loc, StackLoc) + self.mc.MOVSD_bx(loc.value, xmmtmp.value) + self.mc.JMP_l(jmpadr) + return adr_stackadjust + + def _assemble_bootstrap_direct_call_64(self, arglocs, jmpadr, stackdepth): + # XXX: Very similar to _emit_call_64 + + src_locs = [] + dst_locs = [] + xmm_src_locs = [] + xmm_dst_locs = [] + get_from_stack = [] + + # In reverse order for use with pop() + unused_gpr = [r9, r8, ecx, edx, esi, edi] + unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] + + nonfloatlocs, floatlocs = arglocs + adr_stackadjust = self._call_header() + self._patch_stackadjust(adr_stackadjust, stackdepth) + + # The lists are padded with Nones + assert len(nonfloatlocs) == len(floatlocs) + + for i in range(len(nonfloatlocs)): + loc = nonfloatlocs[i] + if loc is not None: + if len(unused_gpr) > 0: + src_locs.append(unused_gpr.pop()) + dst_locs.append(loc) + else: + get_from_stack.append((loc, False)) + + floc = floatlocs[i] + if floc is not None: + if len(unused_xmm) > 0: + xmm_src_locs.append(unused_xmm.pop()) + xmm_dst_locs.append(floc) + else: + get_from_stack.append((floc, True)) + + remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG) + remap_frame_layout(self, xmm_src_locs, xmm_dst_locs, X86_64_XMM_SCRATCH_REG) + + for i in range(len(get_from_stack)): + loc, is_xmm = get_from_stack[i] + if is_xmm: + self.mc.MOVSD_xb(X86_64_XMM_SCRATCH_REG.value, (2 + i) * WORD) + self.mc.MOVSD(loc, X86_64_XMM_SCRATCH_REG) + else: + self.mc.MOV_rb(X86_64_SCRATCH_REG.value, (2 + i) * WORD) + # XXX: We're assuming that "loc" won't require regloc to + # clobber the scratch register + self.mc.MOV(loc, X86_64_SCRATCH_REG) + + self.mc.JMP(imm(jmpadr)) + return adr_stackadjust def _assemble_bootstrap_code(self, inputargs, arglocs): @@ -369,11 +589,12 @@ adr_stackadjust = self._call_header() tmp = X86RegisterManager.all_regs[0] xmmtmp = X86XMMRegisterManager.all_regs[0] + self.mc._mc.begin_reuse_scratch_register() for i in range(len(nonfloatlocs)): loc = nonfloatlocs[i] if loc is None: continue - if isinstance(loc, REG): + if isinstance(loc, RegLoc): target = loc else: target = tmp @@ -387,17 +608,20 @@ adr = self.fail_boxes_int.get_addr_for_num(i) self.mc.MOV(target, heap(adr)) if target is not loc: - self.mc.MOV(loc, target) + assert isinstance(loc, StackLoc) + self.mc.MOV_br(loc.value, target.value) for i in range(len(floatlocs)): loc = floatlocs[i] if loc is None: continue adr = self.fail_boxes_float.get_addr_for_num(i) - if isinstance(loc, REG): - self.mc.MOVSD(loc, heap64(adr)) + if isinstance(loc, RegLoc): + self.mc.MOVSD(loc, heap(adr)) else: - self.mc.MOVSD(xmmtmp, heap64(adr)) - self.mc.MOVSD(loc, xmmtmp) + self.mc.MOVSD(xmmtmp, heap(adr)) + assert isinstance(loc, StackLoc) + self.mc.MOVSD_bx(loc.value, xmmtmp.value) + self.mc._mc.end_reuse_scratch_register() return adr_stackadjust def dump(self, text): @@ -410,27 +634,10 @@ finally: Box._extended_display = _prev - def _start_block(self): - # Return a 'mc' that can be used to write an "atomic" block, - # i.e. one that will not contain any JMP. - mc = self.mc._mc - if not we_are_translated(): - self._block_started_mc = (self.mc, mc.tell()) - self.mc = "block started" - return mc - - def _stop_block(self): - if not we_are_translated(): - assert self.mc == "block started" - self.mc, orgpos = self._block_started_mc - assert 0 <= self.mc._mc.tell() - orgpos <= 58, ( - "too many bytes in _start_block/_stop_block pair") - del self._block_started_mc - # ------------------------------------------------------------ def mov(self, from_loc, to_loc): - if isinstance(from_loc, XMMREG) or isinstance(to_loc, XMMREG): + if (isinstance(from_loc, RegLoc) and from_loc.is_xmm) or (isinstance(to_loc, RegLoc) and to_loc.is_xmm): self.mc.MOVSD(to_loc, from_loc) else: self.mc.MOV(to_loc, from_loc) @@ -438,24 +645,24 @@ regalloc_mov = mov # legacy interface def regalloc_push(self, loc): - if isinstance(loc, XMMREG): - self.mc.SUB(esp, imm(2*WORD)) - self.mc.MOVSD(mem64(esp, 0), loc) - elif isinstance(loc, MODRM64): + if isinstance(loc, RegLoc) and loc.is_xmm: + self.mc.SUB_ri(esp.value, 2*WORD) + self.mc.MOVSD_sx(0, loc.value) + elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: # XXX evil trick - self.mc.PUSH(mem(ebp, get_ebp_ofs(loc.position))) - self.mc.PUSH(mem(ebp, get_ebp_ofs(loc.position + 1))) + self.mc.PUSH_b(get_ebp_ofs(loc.position)) + self.mc.PUSH_b(get_ebp_ofs(loc.position + 1)) else: self.mc.PUSH(loc) def regalloc_pop(self, loc): - if isinstance(loc, XMMREG): - self.mc.MOVSD(loc, mem64(esp, 0)) - self.mc.ADD(esp, imm(2*WORD)) - elif isinstance(loc, MODRM64): + if isinstance(loc, RegLoc) and loc.is_xmm: + self.mc.MOVSD_xs(loc.value, 0) + self.mc.ADD_ri(esp.value, 2*WORD) + elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: # XXX evil trick - self.mc.POP(mem(ebp, get_ebp_ofs(loc.position + 1))) - self.mc.POP(mem(ebp, get_ebp_ofs(loc.position))) + self.mc.POP_b(get_ebp_ofs(loc.position + 1)) + self.mc.POP_b(get_ebp_ofs(loc.position)) else: self.mc.POP(loc) @@ -472,14 +679,14 @@ faildescr._x86_current_depths = current_depths failargs = guard_op.fail_args guard_opnum = guard_op.opnum - failaddr = self.implement_guard_recovery(guard_opnum, - faildescr, failargs, - faillocs) + guard_token = self.implement_guard_recovery(guard_opnum, + faildescr, failargs, + faillocs) if op is None: dispatch_opnum = guard_opnum else: dispatch_opnum = op.opnum - res = genop_guard_list[dispatch_opnum](self, op, guard_op, failaddr, + res = genop_guard_list[dispatch_opnum](self, op, guard_op, guard_token, arglocs, resloc) faildescr._x86_adr_jump_offset = res @@ -506,103 +713,161 @@ rl = result_loc.lowest8bits() if isinstance(op.args[0], Const): self.mc.CMP(arglocs[1], arglocs[0]) - getattr(self.mc, 'SET' + rev_cond)(rl) + self.mc.SET_ir(rx86.Conditions[rev_cond], rl.value) else: self.mc.CMP(arglocs[0], arglocs[1]) - getattr(self.mc, 'SET' + cond)(rl) - self.mc.MOVZX(result_loc, rl) + self.mc.SET_ir(rx86.Conditions[cond], rl.value) + self.mc.MOVZX8_rr(result_loc.value, rl.value) return genop_cmp def _cmpop_float(cond, is_ne=False): def genop_cmp(self, op, arglocs, result_loc): self.mc.UCOMISD(arglocs[0], arglocs[1]) - rl = result_loc.lowest8bits() - rh = result_loc.higher8bits() - getattr(self.mc, 'SET' + cond)(rl) + tmp1 = result_loc.lowest8bits() + if IS_X86_32: + tmp2 = result_loc.higher8bits() + elif IS_X86_64: + tmp2 = X86_64_SCRATCH_REG.lowest8bits() + + self.mc.SET_ir(rx86.Conditions[cond], tmp1.value) if is_ne: - self.mc.SETP(rh) - self.mc.OR(rl, rh) + self.mc.SET_ir(rx86.Conditions['P'], tmp2.value) + self.mc.OR8_rr(tmp1.value, tmp2.value) else: - self.mc.SETNP(rh) - self.mc.AND(rl, rh) - self.mc.MOVZX(result_loc, rl) + self.mc.SET_ir(rx86.Conditions['NP'], tmp2.value) + self.mc.AND8_rr(tmp1.value, tmp2.value) + self.mc.MOVZX8_rr(result_loc.value, tmp1.value) return genop_cmp def _cmpop_guard(cond, rev_cond, false_cond, false_rev_cond): - def genop_cmp_guard(self, op, guard_op, addr, arglocs, result_loc): + def genop_cmp_guard(self, op, guard_op, guard_token, arglocs, result_loc): guard_opnum = guard_op.opnum if isinstance(op.args[0], Const): self.mc.CMP(arglocs[1], arglocs[0]) if guard_opnum == rop.GUARD_FALSE: - name = 'J' + rev_cond - return self.implement_guard(addr, getattr(self.mc, name)) + return self.implement_guard(guard_token, rev_cond) else: - name = 'J' + false_rev_cond - return self.implement_guard(addr, getattr(self.mc, name)) + return self.implement_guard(guard_token, false_rev_cond) else: self.mc.CMP(arglocs[0], arglocs[1]) if guard_opnum == rop.GUARD_FALSE: - name = 'J' + cond - return self.implement_guard(addr, getattr(self.mc, name)) + return self.implement_guard(guard_token, cond) else: - name = 'J' + false_cond - return self.implement_guard(addr, getattr(self.mc, name)) + return self.implement_guard(guard_token, false_cond) return genop_cmp_guard def _cmpop_guard_float(cond, false_cond, need_jp): - def genop_cmp_guard_float(self, op, guard_op, addr, arglocs, + def genop_cmp_guard_float(self, op, guard_op, guard_token, arglocs, result_loc): guard_opnum = guard_op.opnum self.mc.UCOMISD(arglocs[0], arglocs[1]) + # 16 is enough space for the rel8 jumps below and the rel32 + # jump in implement_guard + self.mc.ensure_bytes_available(16 + guard_token.recovery_stub_size()) if guard_opnum == rop.GUARD_FALSE: - mc = self.mc._mc - name = 'J' + cond if need_jp: - mc.JP(rel8(6)) - getattr(mc, name)(rel32(addr)) - return mc.tell() - 4 + self.mc.J_il8(rx86.Conditions['P'], 6) + return self.implement_guard(guard_token, cond) else: if need_jp: - mc = self.mc._mc - mc.JP(rel8(2)) - getattr(mc, 'J' + cond)(rel8(5)) - return self.implement_guard(addr, mc.JMP) - name = 'J' + false_cond - return self.implement_guard(addr, getattr(self.mc, name)) + self.mc.J_il8(rx86.Conditions['P'], 2) + self.mc.J_il8(rx86.Conditions[cond], 5) + return self.implement_guard(guard_token) + return self.implement_guard(guard_token, false_cond) return genop_cmp_guard_float - @specialize.arg(5) - def _emit_call(self, x, arglocs, start=0, tmp=eax, force_mc=False, - mc=None): - if not force_mc: - mc = self.mc + def _emit_call(self, x, arglocs, start=0, tmp=eax): + if IS_X86_64: + return self._emit_call_64(x, arglocs, start) + p = 0 n = len(arglocs) for i in range(start, n): loc = arglocs[i] - if isinstance(loc, REG): - if isinstance(loc, XMMREG): - mc.MOVSD(mem64(esp, p), loc) + if isinstance(loc, RegLoc): + if loc.is_xmm: + self.mc.MOVSD_sx(p, loc.value) else: - mc.MOV(mem(esp, p), loc) + self.mc.MOV_sr(p, loc.value) p += round_up_to_4(loc.width) p = 0 for i in range(start, n): loc = arglocs[i] - if not isinstance(loc, REG): - if isinstance(loc, MODRM64): - mc.MOVSD(xmm0, loc) - mc.MOVSD(mem64(esp, p), xmm0) + if not isinstance(loc, RegLoc): + if loc.width == 8: + self.mc.MOVSD(xmm0, loc) + self.mc.MOVSD_sx(p, xmm0.value) else: - mc.MOV(tmp, loc) - mc.MOV(mem(esp, p), tmp) + self.mc.MOV(tmp, loc) + self.mc.MOV_sr(p, tmp.value) p += round_up_to_4(loc.width) self._regalloc.reserve_param(p//WORD) - mc.CALL(x) + # x is a location + self.mc.CALL(x) self.mark_gc_roots() + + def _emit_call_64(self, x, arglocs, start=0): + src_locs = [] + dst_locs = [] + xmm_src_locs = [] + xmm_dst_locs = [] + pass_on_stack = [] + + # In reverse order for use with pop() + unused_gpr = [r9, r8, ecx, edx, esi, edi] + unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] + + for i in range(start, len(arglocs)): + loc = arglocs[i] + # XXX: Should be much simplier to tell whether a location is a + # float! It's so ugly because we have to "guard" the access to + # .type with isinstance, since not all AssemblerLocation classes + # are "typed" + if ((isinstance(loc, RegLoc) and loc.is_xmm) or + (isinstance(loc, StackLoc) and loc.type == FLOAT) or + (isinstance(loc, ConstFloatLoc))): + if len(unused_xmm) > 0: + xmm_src_locs.append(loc) + xmm_dst_locs.append(unused_xmm.pop()) + else: + pass_on_stack.append(loc) + else: + if len(unused_gpr) > 0: + src_locs.append(loc) + dst_locs.append(unused_gpr.pop()) + else: + pass_on_stack.append(loc) + # Emit instructions to pass the stack arguments + # XXX: Would be nice to let remap_frame_layout take care of this, but + # we'd need to create something like StackLoc, but relative to esp, + # and I don't know if it's worth it. + for i in range(len(pass_on_stack)): + loc = pass_on_stack[i] + if not isinstance(loc, RegLoc): + if isinstance(loc, StackLoc) and loc.type == FLOAT: + self.mc.MOVSD(X86_64_XMM_SCRATCH_REG, loc) + self.mc.MOVSD_sx(i*WORD, X86_64_XMM_SCRATCH_REG.value) + else: + self.mc.MOV(X86_64_SCRATCH_REG, loc) + self.mc.MOV_sr(i*WORD, X86_64_SCRATCH_REG.value) + else: + # It's a register + if loc.is_xmm: + self.mc.MOVSD_sx(i*WORD, loc.value) + else: + self.mc.MOV_sr(i*WORD, loc.value) + + # Handle register arguments + remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG) + remap_frame_layout(self, xmm_src_locs, xmm_dst_locs, X86_64_XMM_SCRATCH_REG) + + self._regalloc.reserve_param(len(pass_on_stack)) + self.mc.CALL(x) + self.mark_gc_roots() + def call(self, addr, args, res): - self._emit_call(rel32(addr), args) + self._emit_call(imm(addr), args) assert res is eax genop_int_neg = _unaryop("NEG") @@ -613,6 +878,9 @@ genop_int_and = _binaryop("AND", True) genop_int_or = _binaryop("OR", True) genop_int_xor = _binaryop("XOR", True) + genop_int_lshift = _binaryop("SHL") + genop_int_rshift = _binaryop("SAR") + genop_uint_rshift = _binaryop("SHR") genop_float_add = _binaryop("ADDSD", True) genop_float_sub = _binaryop('SUBSD') genop_float_mul = _binaryop('MULSD', True) @@ -659,26 +927,27 @@ genop_guard_float_gt = _cmpop_guard_float("A", "BE", False) genop_guard_float_ge = _cmpop_guard_float("AE", "B", False) - def genop_guard_float_ne(self, op, guard_op, addr, arglocs, result_loc): + def genop_guard_float_ne(self, op, guard_op, guard_token, arglocs, result_loc): guard_opnum = guard_op.opnum self.mc.UCOMISD(arglocs[0], arglocs[1]) - mc = self.mc._mc + # 16 is enough space for the rel8 jumps below and the rel32 + # jump in implement_guard + self.mc.ensure_bytes_available(16 + guard_token.recovery_stub_size()) if guard_opnum == rop.GUARD_TRUE: - mc.JP(rel8(6)) - mc.JE(rel32(addr)) - return mc.tell() - 4 - else: - mc.JP(rel8(2)) - mc.JE(rel8(5)) - return self.implement_guard(addr, mc.JMP) + self.mc.J_il8(rx86.Conditions['P'], 6) + return self.implement_guard(guard_token, 'E') + else: + self.mc.J_il8(rx86.Conditions['P'], 2) + self.mc.J_il8(rx86.Conditions['E'], 5) + return self.implement_guard(guard_token) def genop_float_neg(self, op, arglocs, resloc): # Following what gcc does: res = x ^ 0x8000000000000000 - self.mc.XORPD(arglocs[0], self.loc_float_const_neg) + self.mc.XORPD(arglocs[0], heap(self.float_const_neg_addr)) def genop_float_abs(self, op, arglocs, resloc): # Following what gcc does: res = x & 0x7FFFFFFFFFFFFFFF - self.mc.ANDPD(arglocs[0], self.loc_float_const_abs) + self.mc.ANDPD(arglocs[0], heap(self.float_const_abs_addr)) def genop_cast_float_to_int(self, op, arglocs, resloc): self.mc.CVTTSD2SI(resloc, arglocs[0]) @@ -686,70 +955,56 @@ def genop_cast_int_to_float(self, op, arglocs, resloc): self.mc.CVTSI2SD(resloc, arglocs[0]) - def genop_int_lshift(self, op, arglocs, resloc): - loc, loc2 = arglocs - if loc2 is ecx: - loc2 = cl - self.mc.SHL(loc, loc2) - - def genop_int_rshift(self, op, arglocs, resloc): - loc, loc2 = arglocs - if loc2 is ecx: - loc2 = cl - self.mc.SAR(loc, loc2) - - def genop_uint_rshift(self, op, arglocs, resloc): - loc, loc2 = arglocs - if loc2 is ecx: - loc2 = cl - self.mc.SHR(loc, loc2) - - def genop_guard_int_is_true(self, op, guard_op, addr, arglocs, resloc): + def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc): guard_opnum = guard_op.opnum - self.mc.CMP(arglocs[0], imm8(0)) + self.mc.CMP(arglocs[0], imm(0)) if guard_opnum == rop.GUARD_TRUE: - return self.implement_guard(addr, self.mc.JZ) + return self.implement_guard(guard_token, 'Z') else: - return self.implement_guard(addr, self.mc.JNZ) + return self.implement_guard(guard_token, 'NZ') def genop_int_is_true(self, op, arglocs, resloc): - self.mc.CMP(arglocs[0], imm8(0)) + self.mc.CMP(arglocs[0], imm(0)) rl = resloc.lowest8bits() - self.mc.SETNE(rl) - self.mc.MOVZX(resloc, rl) + self.mc.SET_ir(rx86.Conditions['NE'], rl.value) + self.mc.MOVZX8(resloc, rl) - def genop_guard_int_is_zero(self, op, guard_op, addr, arglocs, resloc): + def genop_guard_int_is_zero(self, op, guard_op, guard_token, arglocs, resloc): guard_opnum = guard_op.opnum - self.mc.CMP(arglocs[0], imm8(0)) + self.mc.CMP(arglocs[0], imm(0)) if guard_opnum == rop.GUARD_TRUE: - return self.implement_guard(addr, self.mc.JNZ) + return self.implement_guard(guard_token, 'NZ') else: - return self.implement_guard(addr, self.mc.JZ) + return self.implement_guard(guard_token, 'Z') def genop_int_is_zero(self, op, arglocs, resloc): - self.mc.CMP(arglocs[0], imm8(0)) + self.mc.CMP(arglocs[0], imm(0)) rl = resloc.lowest8bits() - self.mc.SETE(rl) - self.mc.MOVZX(resloc, rl) + self.mc.SET_ir(rx86.Conditions['E'], rl.value) + self.mc.MOVZX8(resloc, rl) def genop_same_as(self, op, arglocs, resloc): self.mov(arglocs[0], resloc) #genop_cast_ptr_to_int = genop_same_as def genop_int_mod(self, op, arglocs, resloc): - self.mc.CDQ() - self.mc.IDIV(ecx) + if IS_X86_32: + self.mc.CDQ() + elif IS_X86_64: + self.mc.CQO() + + self.mc.IDIV_r(ecx.value) genop_int_floordiv = genop_int_mod def genop_uint_floordiv(self, op, arglocs, resloc): - self.mc.XOR(edx, edx) - self.mc.DIV(ecx) + self.mc.XOR_rr(edx.value, edx.value) + self.mc.DIV_r(ecx.value) def genop_new_with_vtable(self, op, arglocs, result_loc): assert result_loc is eax loc_vtable = arglocs[-1] - assert isinstance(loc_vtable, IMM32) + assert isinstance(loc_vtable, ImmedLoc) arglocs = arglocs[:-1] self.call(self.malloc_func_addr, arglocs, eax) # xxx ignore NULL returns for now @@ -757,7 +1012,9 @@ def set_vtable(self, loc, loc_vtable): if self.cpu.vtable_offset is not None: - self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable) + assert isinstance(loc, RegLoc) + assert isinstance(loc_vtable, ImmedLoc) + self.mc.MOV_mi((loc.value, self.cpu.vtable_offset), loc_vtable.value) # XXX genop_new is abused for all varsized mallocs with Boehm, for now # (instead of genop_new_array, genop_newstr, genop_newunicode) @@ -779,16 +1036,22 @@ def genop_getfield_gc(self, op, arglocs, resloc): base_loc, ofs_loc, size_loc = arglocs - assert isinstance(size_loc, IMM32) + assert isinstance(size_loc, ImmedLoc) + assert isinstance(resloc, RegLoc) size = size_loc.value - if size == 1: - self.mc.MOVZX(resloc, addr8_add(base_loc, ofs_loc)) + + source_addr = AddressLoc(base_loc, ofs_loc) + if resloc.is_xmm: + self.mc.MOVSD(resloc, source_addr) + elif size == 1: + self.mc.MOVZX8(resloc, source_addr) elif size == 2: - self.mc.MOVZX(resloc, addr_add(base_loc, ofs_loc)) - elif size == WORD: - self.mc.MOV(resloc, addr_add(base_loc, ofs_loc)) - elif size == 8: - self.mc.MOVSD(resloc, addr64_add(base_loc, ofs_loc)) + self.mc.MOVZX16(resloc, source_addr) + elif size == 4: + # MOV32 is zero-extending on 64-bit, so this is okay + self.mc.MOV32(resloc, source_addr) + elif IS_X86_64 and size == 8: + self.mc.MOV(resloc, source_addr) else: raise NotImplementedError("getfield size = %d" % size) @@ -798,20 +1061,22 @@ def genop_getarrayitem_gc(self, op, arglocs, resloc): base_loc, ofs_loc, scale, ofs = arglocs - assert isinstance(ofs, IMM32) - assert isinstance(scale, IMM32) + assert isinstance(ofs, ImmedLoc) + assert isinstance(scale, ImmedLoc) + src_addr = addr_add(base_loc, ofs_loc, ofs.value, scale.value) if op.result.type == FLOAT: - self.mc.MOVSD(resloc, addr64_add(base_loc, ofs_loc, ofs.value, - scale.value)) + self.mc.MOVSD(resloc, src_addr) else: if scale.value == 0: - self.mc.MOVZX(resloc, addr8_add(base_loc, ofs_loc, ofs.value, - scale.value)) + self.mc.MOVZX8(resloc, src_addr) + elif scale.value == 1: + self.mc.MOVZX16(resloc, src_addr) elif scale.value == 2: - self.mc.MOV(resloc, addr_add(base_loc, ofs_loc, ofs.value, - scale.value)) + self.mc.MOV32(resloc, src_addr) + elif IS_X86_64 and scale.value == 3: + self.mc.MOV(resloc, src_addr) else: - print "[asmgen]setarrayitem unsupported size: %d" % scale.value + print "[asmgen]getarrayitem unsupported size: %d" % scale.value raise NotImplementedError() genop_getarrayitem_gc_pure = genop_getarrayitem_gc @@ -819,34 +1084,39 @@ def genop_discard_setfield_gc(self, op, arglocs): base_loc, ofs_loc, size_loc, value_loc = arglocs - assert isinstance(size_loc, IMM32) + assert isinstance(size_loc, ImmedLoc) size = size_loc.value - if size == WORD * 2: - self.mc.MOVSD(addr64_add(base_loc, ofs_loc), value_loc) - elif size == WORD: - self.mc.MOV(addr_add(base_loc, ofs_loc), value_loc) + dest_addr = AddressLoc(base_loc, ofs_loc) + if isinstance(value_loc, RegLoc) and value_loc.is_xmm: + self.mc.MOVSD(dest_addr, value_loc) + elif IS_X86_64 and size == 8: + self.mc.MOV(dest_addr, value_loc) + elif size == 4: + self.mc.MOV32(dest_addr, value_loc) elif size == 2: - self.mc.MOV16(addr_add(base_loc, ofs_loc), value_loc) + self.mc.MOV16(dest_addr, value_loc) elif size == 1: - self.mc.MOV(addr8_add(base_loc, ofs_loc), value_loc.lowest8bits()) + self.mc.MOV8(dest_addr, value_loc.lowest8bits()) else: print "[asmgen]setfield addr size %d" % size raise NotImplementedError("Addr size %d" % size) def genop_discard_setarrayitem_gc(self, op, arglocs): base_loc, ofs_loc, value_loc, scale_loc, baseofs = arglocs - assert isinstance(baseofs, IMM32) - assert isinstance(scale_loc, IMM32) + assert isinstance(baseofs, ImmedLoc) + assert isinstance(scale_loc, ImmedLoc) + dest_addr = AddressLoc(base_loc, ofs_loc, scale_loc.value, baseofs.value) if op.args[2].type == FLOAT: - self.mc.MOVSD(addr64_add(base_loc, ofs_loc, baseofs.value, - scale_loc.value), value_loc) + self.mc.MOVSD(dest_addr, value_loc) else: - if scale_loc.value == 2: - self.mc.MOV(addr_add(base_loc, ofs_loc, baseofs.value, - scale_loc.value), value_loc) + if IS_X86_64 and scale_loc.value == 3: + self.mc.MOV(dest_addr, value_loc) + elif scale_loc.value == 2: + self.mc.MOV32(dest_addr, value_loc) + elif scale_loc.value == 1: + self.mc.MOV16(dest_addr, value_loc) elif scale_loc.value == 0: - self.mc.MOV(addr8_add(base_loc, ofs_loc, baseofs.value, - scale_loc.value), value_loc.lowest8bits()) + self.mc.MOV8(dest_addr, value_loc.lowest8bits()) else: raise NotImplementedError("scale = %d" % scale_loc.value) @@ -855,17 +1125,17 @@ basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, self.cpu.translate_support_code) assert itemsize == 1 - self.mc.MOV(addr8_add(base_loc, ofs_loc, basesize), - val_loc.lowest8bits()) + dest_addr = AddressLoc(base_loc, ofs_loc, 0, basesize) + self.mc.MOV8(dest_addr, val_loc.lowest8bits()) def genop_discard_unicodesetitem(self, op, arglocs): base_loc, ofs_loc, val_loc = arglocs basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, self.cpu.translate_support_code) if itemsize == 4: - self.mc.MOV(addr_add(base_loc, ofs_loc, basesize, 2), val_loc) + self.mc.MOV32(AddressLoc(base_loc, ofs_loc, 2, basesize), val_loc) elif itemsize == 2: - self.mc.MOV16(addr_add(base_loc, ofs_loc, basesize, 1), val_loc) + self.mc.MOV16(AddressLoc(base_loc, ofs_loc, 1, basesize), val_loc) else: assert 0, itemsize @@ -886,7 +1156,7 @@ def genop_arraylen_gc(self, op, arglocs, resloc): base_loc, ofs_loc = arglocs - assert isinstance(ofs_loc, IMM32) + assert isinstance(ofs_loc, ImmedLoc) self.mc.MOV(resloc, addr_add_const(base_loc, ofs_loc.value)) def genop_strgetitem(self, op, arglocs, resloc): @@ -894,83 +1164,83 @@ basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, self.cpu.translate_support_code) assert itemsize == 1 - self.mc.MOVZX(resloc, addr8_add(base_loc, ofs_loc, basesize)) + self.mc.MOVZX8(resloc, AddressLoc(base_loc, ofs_loc, 0, basesize)) def genop_unicodegetitem(self, op, arglocs, resloc): base_loc, ofs_loc = arglocs basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, self.cpu.translate_support_code) if itemsize == 4: - self.mc.MOV(resloc, addr_add(base_loc, ofs_loc, basesize, 2)) + self.mc.MOV32(resloc, AddressLoc(base_loc, ofs_loc, 2, basesize)) elif itemsize == 2: - self.mc.MOVZX(resloc, addr_add(base_loc, ofs_loc, basesize, 1)) + self.mc.MOVZX16(resloc, AddressLoc(base_loc, ofs_loc, 1, basesize)) else: assert 0, itemsize - def genop_guard_guard_true(self, ign_1, guard_op, addr, locs, ign_2): + def genop_guard_guard_true(self, ign_1, guard_op, guard_token, locs, ign_2): loc = locs[0] self.mc.TEST(loc, loc) - return self.implement_guard(addr, self.mc.JZ) + return self.implement_guard(guard_token, 'Z') genop_guard_guard_nonnull = genop_guard_guard_true - def genop_guard_guard_no_exception(self, ign_1, guard_op, addr, + def genop_guard_guard_no_exception(self, ign_1, guard_op, guard_token, locs, ign_2): self.mc.CMP(heap(self.cpu.pos_exception()), imm(0)) - return self.implement_guard(addr, self.mc.JNZ) + return self.implement_guard(guard_token, 'NZ') - def genop_guard_guard_exception(self, ign_1, guard_op, addr, + def genop_guard_guard_exception(self, ign_1, guard_op, guard_token, locs, resloc): loc = locs[0] loc1 = locs[1] self.mc.MOV(loc1, heap(self.cpu.pos_exception())) self.mc.CMP(loc1, loc) - addr = self.implement_guard(addr, self.mc.JNE) + addr = self.implement_guard(guard_token, 'NE') if resloc is not None: self.mc.MOV(resloc, heap(self.cpu.pos_exc_value())) self.mc.MOV(heap(self.cpu.pos_exception()), imm(0)) self.mc.MOV(heap(self.cpu.pos_exc_value()), imm(0)) return addr - def _gen_guard_overflow(self, guard_op, addr): + def _gen_guard_overflow(self, guard_op, guard_token): guard_opnum = guard_op.opnum if guard_opnum == rop.GUARD_NO_OVERFLOW: - return self.implement_guard(addr, self.mc.JO) + return self.implement_guard(guard_token, 'O') elif guard_opnum == rop.GUARD_OVERFLOW: - return self.implement_guard(addr, self.mc.JNO) + return self.implement_guard(guard_token, 'NO') else: print "int_xxx_ovf followed by", guard_op.getopname() raise AssertionError - def genop_guard_int_add_ovf(self, op, guard_op, addr, arglocs, result_loc): + def genop_guard_int_add_ovf(self, op, guard_op, guard_token, arglocs, result_loc): self.genop_int_add(op, arglocs, result_loc) - return self._gen_guard_overflow(guard_op, addr) + return self._gen_guard_overflow(guard_op, guard_token) - def genop_guard_int_sub_ovf(self, op, guard_op, addr, arglocs, result_loc): + def genop_guard_int_sub_ovf(self, op, guard_op, guard_token, arglocs, result_loc): self.genop_int_sub(op, arglocs, result_loc) - return self._gen_guard_overflow(guard_op, addr) + return self._gen_guard_overflow(guard_op, guard_token) - def genop_guard_int_mul_ovf(self, op, guard_op, addr, arglocs, result_loc): + def genop_guard_int_mul_ovf(self, op, guard_op, guard_token, arglocs, result_loc): self.genop_int_mul(op, arglocs, result_loc) - return self._gen_guard_overflow(guard_op, addr) + return self._gen_guard_overflow(guard_op, guard_token) - def genop_guard_guard_false(self, ign_1, guard_op, addr, locs, ign_2): + def genop_guard_guard_false(self, ign_1, guard_op, guard_token, locs, ign_2): loc = locs[0] self.mc.TEST(loc, loc) - return self.implement_guard(addr, self.mc.JNZ) + return self.implement_guard(guard_token, 'NZ') genop_guard_guard_isnull = genop_guard_guard_false - def genop_guard_guard_value(self, ign_1, guard_op, addr, locs, ign_2): + def genop_guard_guard_value(self, ign_1, guard_op, guard_token, locs, ign_2): if guard_op.args[0].type == FLOAT: assert guard_op.args[1].type == FLOAT self.mc.UCOMISD(locs[0], locs[1]) else: self.mc.CMP(locs[0], locs[1]) - return self.implement_guard(addr, self.mc.JNE) + return self.implement_guard(guard_token, 'NE') - def _cmp_guard_class(self, mc, locs): + def _cmp_guard_class(self, locs): offset = self.cpu.vtable_offset if offset is not None: - mc.CMP(mem(locs[0], offset), locs[1]) + self.mc.CMP(mem(locs[0], offset), locs[1]) else: # XXX hard-coded assumption: to go from an object to its class # we use the following algorithm: @@ -979,7 +1249,7 @@ # - multiply by 4 and use it as an offset in type_info_group # - add 16 bytes, to go past the TYPE_INFO structure loc = locs[1] - assert isinstance(loc, IMM32) + assert isinstance(loc, ImmedLoc) classptr = loc.value # here, we have to go back from 'classptr' to the value expected # from reading the 16 bits in the object header @@ -987,38 +1257,42 @@ sizeof_ti = rffi.sizeof(GCData.TYPE_INFO) type_info_group = llop.gc_get_type_info_group(llmemory.Address) type_info_group = rffi.cast(lltype.Signed, type_info_group) - expected_typeid = (classptr - sizeof_ti - type_info_group) >> 2 - mc.CMP16(mem(locs[0], 0), imm32(expected_typeid)) - - def genop_guard_guard_class(self, ign_1, guard_op, addr, locs, ign_2): - mc = self._start_block() - self._cmp_guard_class(mc, locs) - self._stop_block() - return self.implement_guard(addr, self.mc.JNE) + expected_typeid = classptr - sizeof_ti - type_info_group + if IS_X86_32: + expected_typeid >>= 2 + self.mc.CMP16(mem(locs[0], 0), ImmedLoc(expected_typeid)) + elif IS_X86_64: + self.mc.CMP32_mi((locs[0].value, 0), expected_typeid) + + def genop_guard_guard_class(self, ign_1, guard_op, guard_token, locs, ign_2): + self.mc.ensure_bytes_available(256) + self._cmp_guard_class(locs) + return self.implement_guard(guard_token, 'NE') def genop_guard_guard_nonnull_class(self, ign_1, guard_op, - addr, locs, ign_2): - mc = self._start_block() - mc.CMP(locs[0], imm8(1)) - mc.JB(rel8_patched_later) - jb_location = mc.get_relative_pos() - self._cmp_guard_class(mc, locs) + guard_token, locs, ign_2): + self.mc.ensure_bytes_available(256) + self.mc.CMP(locs[0], imm(1)) + # Patched below + self.mc.J_il8(rx86.Conditions['B'], 0) + jb_location = self.mc.get_relative_pos() + self._cmp_guard_class(locs) # patch the JB above - offset = mc.get_relative_pos() - jb_location + offset = self.mc.get_relative_pos() - jb_location assert 0 < offset <= 127 - mc.overwrite(jb_location-1, [chr(offset)]) - self._stop_block() + self.mc.overwrite(jb_location-1, [chr(offset)]) # - return self.implement_guard(addr, self.mc.JNE) + return self.implement_guard(guard_token, 'NE') def implement_guard_recovery(self, guard_opnum, faildescr, failargs, fail_locs): exc = (guard_opnum == rop.GUARD_EXCEPTION or guard_opnum == rop.GUARD_NO_EXCEPTION or guard_opnum == rop.GUARD_NOT_FORCED) - return self.generate_quick_failure(faildescr, failargs, fail_locs, exc) + desc_bytes = self.failure_recovery_description(failargs, fail_locs) + return GuardToken(faildescr, failargs, fail_locs, exc, desc_bytes) - def generate_quick_failure(self, faildescr, failargs, fail_locs, exc): + def generate_quick_failure(self, mc, faildescr, failargs, fail_locs, exc, desc_bytes): """Generate the initial code for handling a failure. We try to keep it as compact as possible. The idea is that this code is executed at most once (and very often, zero times); when @@ -1026,38 +1300,43 @@ really handle recovery from this particular failure. """ fail_index = self.cpu.get_fail_descr_number(faildescr) - bytes_needed = 20 + 5 * len(failargs) # conservative estimate - if self.mc2.bytes_free() < bytes_needed: - self.mc2.make_new_mc() - mc = self.mc2._mc addr = mc.tell() withfloats = False for box in failargs: if box is not None and box.type == FLOAT: withfloats = True break - mc.CALL(rel32(self.failure_recovery_code[exc + 2 * withfloats])) + mc.CALL(imm(self.failure_recovery_code[exc + 2 * withfloats])) # write tight data that describes the failure recovery faildescr._x86_failure_recovery_bytecode = mc.tell() - self.write_failure_recovery_description(mc, failargs, fail_locs) + for byte in desc_bytes: + mc.writechr(ord(byte)) # write the fail_index too - mc.write(packimm32(fail_index)) + mc.writeimm32(fail_index) # for testing the decoding, write a final byte 0xCC if not we_are_translated(): mc.writechr(0xCC) faildescr._x86_debug_faillocs = [loc for loc in fail_locs if loc is not None] + + # Make sure the recovery stub is at least 16 bytes long (for the + # case where we overwrite the recovery stub with a 64-bit absolute + # jump) + while mc.tell() - addr < 16: + mc.writechr(0x00) return addr DESCR_REF = 0x00 DESCR_INT = 0x01 DESCR_FLOAT = 0x02 DESCR_SPECIAL = 0x03 - CODE_FROMSTACK = 4*8 + # XXX: 4*8 works on i386, should we optimize for that case? + CODE_FROMSTACK = 4*16 CODE_STOP = 0 | DESCR_SPECIAL CODE_HOLE = 4 | DESCR_SPECIAL - def write_failure_recovery_description(self, mc, failargs, locs): + def failure_recovery_description(self, failargs, locs): + desc_bytes = [] for i in range(len(failargs)): arg = failargs[i] if arg is not None: @@ -1070,24 +1349,30 @@ else: raise AssertionError("bogus kind") loc = locs[i] - if isinstance(loc, MODRM): + if isinstance(loc, StackLoc): n = self.CODE_FROMSTACK//4 + loc.position else: - assert isinstance(loc, REG) - n = loc.op + assert isinstance(loc, RegLoc) + n = loc.value n = kind + 4*n while n > 0x7F: - mc.writechr((n & 0x7F) | 0x80) + desc_bytes.append(chr((n & 0x7F) | 0x80)) n >>= 7 else: n = self.CODE_HOLE - mc.writechr(n) - mc.writechr(self.CODE_STOP) + desc_bytes.append(chr(n)) + desc_bytes.append(chr(self.CODE_STOP)) # assert that the fail_boxes lists are big enough assert len(failargs) <= self.fail_boxes_int.SIZE + return desc_bytes + + def write_failure_recovery_description(self, mc, failargs, locs): + for byte in self.failure_recovery_description(failargs, locs): + mc.writechr(ord(byte)) def rebuild_faillocs_from_descr(self, bytecode): from pypy.jit.backend.x86.regalloc import X86FrameManager + descr_to_box_type = [REF, INT, FLOAT] bytecode = rffi.cast(rffi.UCHARP, bytecode) arglocs = [] while 1: @@ -1112,7 +1397,7 @@ size = 2 else: size = 1 - loc = X86FrameManager.frame_pos(code, size) + loc = X86FrameManager.frame_pos(code, descr_to_box_type[kind]) elif code == self.CODE_STOP: break elif code == self.CODE_HOLE: @@ -1122,16 +1407,16 @@ kind = code & 3 code >>= 2 if kind == self.DESCR_FLOAT: - loc = xmm_registers[code] + loc = regloc.XMMREGLOCS[code] else: - loc = registers[code] + loc = regloc.REGLOCS[code] arglocs.append(loc) return arglocs[:] @rgc.no_collect def grab_frame_values(self, bytecode, frame_addr, allregisters): # no malloc allowed here!! - self.fail_ebp = allregisters[16 + ebp.op] + self.fail_ebp = allregisters[16 + ebp.value] num = 0 value_hi = 0 while 1: @@ -1154,7 +1439,7 @@ code = (code - self.CODE_FROMSTACK) >> 2 stackloc = frame_addr + get_ebp_ofs(code) value = rffi.cast(rffi.LONGP, stackloc)[0] - if kind == self.DESCR_FLOAT: + if kind == self.DESCR_FLOAT and WORD == 4: value_hi = value value = rffi.cast(rffi.LONGP, stackloc - 4)[0] else: @@ -1168,8 +1453,11 @@ break code >>= 2 if kind == self.DESCR_FLOAT: - value = allregisters[2*code] - value_hi = allregisters[2*code + 1] + if WORD == 4: + value = allregisters[2*code] + value_hi = allregisters[2*code + 1] + else: + value = allregisters[code] else: value = allregisters[16 + code] @@ -1180,7 +1468,8 @@ tgt = self.fail_boxes_ptr.get_addr_for_num(num) elif kind == self.DESCR_FLOAT: tgt = self.fail_boxes_float.get_addr_for_num(num) - rffi.cast(rffi.LONGP, tgt)[1] = value_hi + if WORD == 4: + rffi.cast(rffi.LONGP, tgt)[1] = value_hi else: assert 0, "bogus kind" rffi.cast(rffi.LONGP, tgt)[0] = value @@ -1189,7 +1478,8 @@ if not we_are_translated(): assert bytecode[4] == 0xCC self.fail_boxes_count = num - fail_index = rffi.cast(rffi.LONGP, bytecode)[0] + fail_index = rffi.cast(rffi.INTP, bytecode)[0] + fail_index = rffi.cast(lltype.Signed, fail_index) return fail_index def setup_failure_recovery(self): @@ -1200,8 +1490,8 @@ # original value of the registers, optionally the original # value of XMM registers, and finally a reference to the # recovery bytecode. See _build_failure_recovery() for details. - stack_at_ebp = registers[ebp.op] - bytecode = rffi.cast(rffi.UCHARP, registers[8]) + stack_at_ebp = registers[ebp.value] + bytecode = rffi.cast(rffi.UCHARP, registers[self.cpu.NUM_REGS]) allregisters = rffi.ptradd(registers, -16) return self.grab_frame_values(bytecode, stack_at_ebp, allregisters) @@ -1216,23 +1506,23 @@ self.failure_recovery_func) failure_recovery_func = rffi.cast(lltype.Signed, failure_recovery_func) - mc = self.mc2._mc + mc = self.mc._mc # Assume that we are called at the beginning, when there is no risk # that 'mc' runs out of space. Checked by asserts in mc.write(). recovery_addr = mc.tell() - mc.PUSH(edi) - mc.PUSH(esi) - mc.PUSH(ebp) - mc.PUSH(esp) # <-- not really used, but needed to take up the space - mc.PUSH(ebx) - mc.PUSH(edx) - mc.PUSH(ecx) - mc.PUSH(eax) - mc.MOV(esi, esp) + + # Push all general purpose registers + for gpr in range(self.cpu.NUM_REGS-1, -1, -1): + mc.PUSH_r(gpr) + + # ebx/rbx is callee-save in both i386 and x86-64 + mc.MOV_rr(ebx.value, esp.value) + if withfloats: - mc.SUB(esp, imm(8*8)) - for i in range(8): - mc.MOVSD(mem64(esp, 8*i), xmm_registers[i]) + # Push all float registers + mc.SUB_ri(esp.value, self.cpu.NUM_REGS*8) + for i in range(self.cpu.NUM_REGS): + mc.MOVSD_sx(8*i, i) # we call a provided function that will # - call our on_leave_jitted_hook which will mark @@ -1240,7 +1530,7 @@ # avoid unwarranted freeing # - optionally save exception depending on the flag addr = self.cpu.get_on_leave_jitted_int(save_exception=exc) - mc.CALL(rel32(addr)) + mc.CALL(imm(addr)) # the following call saves all values from the stack and from # registers to the right 'fail_boxes_' location. @@ -1250,50 +1540,58 @@ # bytecode, pushed just before by the CALL instruction written by # generate_quick_failure(). XXX misaligned stack in the call, but # it's ok because failure_recovery_func is not calling anything more - mc.PUSH(esi) - mc.CALL(rel32(failure_recovery_func)) + + # XXX + if IS_X86_32: + mc.PUSH_r(ebx.value) + elif IS_X86_64: + mc.MOV_rr(edi.value, ebx.value) + # XXX: Correct to only align the stack on 64-bit? + mc.AND_ri(esp.value, -16) + else: + raise AssertionError("Shouldn't happen") + + mc.CALL(imm(failure_recovery_func)) # returns in eax the fail_index # now we return from the complete frame, which starts from - # _assemble_bootstrap_code(). The LEA below throws away most - # of the frame, including all the PUSHes that we did just above. - mc.LEA(esp, addr_add(ebp, imm(-3 * WORD))) - mc.POP(edi) # [ebp-12] - mc.POP(esi) # [ebp-8] - mc.POP(ebx) # [ebp-4] - mc.POP(ebp) # [ebp] - mc.RET() - self.mc2.done() + # _assemble_bootstrap_code(). The LEA in _call_footer below throws + # away most of the frame, including all the PUSHes that we did just + # above. + + self._call_footer() + self.mc.done() self.failure_recovery_code[exc + 2 * withfloats] = recovery_addr def generate_failure(self, fail_index, locs, exc, locs_are_ref): - mc = self.mc + self.mc._mc.begin_reuse_scratch_register() for i in range(len(locs)): loc = locs[i] - if isinstance(loc, REG): - if loc.width == 8: + if isinstance(loc, RegLoc): + if loc.is_xmm: adr = self.fail_boxes_float.get_addr_for_num(i) - mc.MOVSD(heap64(adr), loc) + self.mc.MOVSD(heap(adr), loc) else: if locs_are_ref[i]: adr = self.fail_boxes_ptr.get_addr_for_num(i) else: adr = self.fail_boxes_int.get_addr_for_num(i) - mc.MOV(heap(adr), loc) + self.mc.MOV(heap(adr), loc) for i in range(len(locs)): loc = locs[i] - if not isinstance(loc, REG): - if loc.width == 8: - mc.MOVSD(xmm0, loc) + if not isinstance(loc, RegLoc): + if isinstance(loc, StackLoc) and loc.type == FLOAT: + self.mc.MOVSD_xb(xmm0.value, loc.value) adr = self.fail_boxes_float.get_addr_for_num(i) - mc.MOVSD(heap64(adr), xmm0) + self.mc.MOVSD(heap(adr), xmm0) else: if locs_are_ref[i]: adr = self.fail_boxes_ptr.get_addr_for_num(i) else: adr = self.fail_boxes_int.get_addr_for_num(i) - mc.MOV(eax, loc) - mc.MOV(heap(adr), eax) + self.mc.MOV(eax, loc) + self.mc.MOV(heap(adr), eax) + self.mc._mc.end_reuse_scratch_register() # we call a provided function that will # - call our on_leave_jitted_hook which will mark @@ -1301,28 +1599,31 @@ # avoid unwarranted freeing # - optionally save exception depending on the flag addr = self.cpu.get_on_leave_jitted_int(save_exception=exc) - mc.CALL(rel32(addr)) + self.mc.CALL(imm(addr)) + + self.mc.MOV_ri(eax.value, fail_index) + + # exit function + self._call_footer() - mc.LEA(esp, addr_add(ebp, imm(-3 * WORD))) - mc.MOV(eax, imm(fail_index)) - mc.POP(edi) # [ebp-12] - mc.POP(esi) # [ebp-8] - mc.POP(ebx) # [ebp-4] - mc.POP(ebp) # [ebp] - mc.RET() - - @specialize.arg(2) - def implement_guard(self, addr, emit_jump): - emit_jump(rel32(addr)) + def implement_guard(self, guard_token, condition=None): + self.mc.reserve_bytes(guard_token.recovery_stub_size()) + self.pending_guard_tokens.append(guard_token) + # XXX: These jumps are patched later, the self.mc.tell() are just + # dummy values + if condition: + self.mc.J_il(rx86.Conditions[condition], self.mc.tell()) + else: + self.mc.JMP_l(self.mc.tell()) return self.mc.tell() - 4 def genop_call(self, op, arglocs, resloc): sizeloc = arglocs[0] - assert isinstance(sizeloc, IMM32) + assert isinstance(sizeloc, ImmedLoc) size = sizeloc.value if isinstance(op.args[0], Const): - x = rel32(op.args[0].getint()) + x = imm(op.args[0].getint()) else: x = arglocs[1] if x is eax: @@ -1332,35 +1633,35 @@ self._emit_call(x, arglocs, 2, tmp=tmp) - if isinstance(resloc, MODRM64): - self.mc.FSTP(resloc) + if isinstance(resloc, StackLoc) and resloc.width == 8 and IS_X86_32: + self.mc.FSTP_b(resloc.value) elif size == 1: - self.mc.AND(eax, imm(0xff)) + self.mc.AND_ri(eax.value, 0xff) elif size == 2: - self.mc.AND(eax, imm(0xffff)) + self.mc.AND_ri(eax.value, 0xffff) - def genop_guard_call_may_force(self, op, guard_op, addr, + def genop_guard_call_may_force(self, op, guard_op, guard_token, arglocs, result_loc): faildescr = guard_op.descr fail_index = self.cpu.get_fail_descr_number(faildescr) - self.mc.MOV(mem(ebp, FORCE_INDEX_OFS), imm(fail_index)) + self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) self.genop_call(op, arglocs, result_loc) - self.mc.CMP(mem(ebp, FORCE_INDEX_OFS), imm(0)) - return self.implement_guard(addr, self.mc.JL) + self.mc.CMP_bi(FORCE_INDEX_OFS, 0) + return self.implement_guard(guard_token, 'L') - def genop_guard_call_assembler(self, op, guard_op, addr, + def genop_guard_call_assembler(self, op, guard_op, guard_token, arglocs, result_loc): faildescr = guard_op.descr fail_index = self.cpu.get_fail_descr_number(faildescr) - self.mc.MOV(mem(ebp, FORCE_INDEX_OFS), imm(fail_index)) + self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) descr = op.descr assert isinstance(descr, LoopToken) assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) # # Write a call to the direct_bootstrap_code of the target assembler - self._emit_call(rel32(descr._x86_direct_bootstrap_code), arglocs, 2, + self._emit_call(imm(descr._x86_direct_bootstrap_code), arglocs, 2, tmp=eax) - mc = self._start_block() + self.mc.ensure_bytes_available(256) if op.result is None: assert result_loc is None value = self.cpu.done_with_this_frame_void_v @@ -1376,26 +1677,27 @@ value = self.cpu.done_with_this_frame_float_v else: raise AssertionError(kind) - mc.CMP(eax, imm(value)) - mc.JE(rel8_patched_later) # goto B if we get 'done_with_this_frame' - je_location = mc.get_relative_pos() + self.mc.CMP_ri(eax.value, value) + # patched later + self.mc.J_il8(rx86.Conditions['E'], 0) # goto B if we get 'done_with_this_frame' + je_location = self.mc.get_relative_pos() # # Path A: use assembler_helper_adr jd = descr.outermost_jitdriver_sd assert jd is not None asm_helper_adr = self.cpu.cast_adr_to_int(jd.assembler_helper_adr) - self._emit_call(rel32(asm_helper_adr), [eax, arglocs[1]], 0, - tmp=ecx, force_mc=True, mc=mc) - if isinstance(result_loc, MODRM64): - mc.FSTP(result_loc) + self._emit_call(imm(asm_helper_adr), [eax, arglocs[1]], 0, + tmp=ecx) + if IS_X86_32 and isinstance(result_loc, StackLoc) and result_loc.type == FLOAT: + self.mc.FSTP_b(result_loc.value) #else: result_loc is already either eax or None, checked below - mc.JMP(rel8_patched_later) # done - jmp_location = mc.get_relative_pos() + self.mc.JMP_l8(0) # jump to done, patched later + jmp_location = self.mc.get_relative_pos() # # Path B: fast path. Must load the return value, and reset the token offset = jmp_location - je_location assert 0 < offset <= 127 - mc.overwrite(je_location - 1, [chr(offset)]) + self.mc.overwrite(je_location - 1, [chr(offset)]) # # Reset the vable token --- XXX really too much special logic here:-( if jd.index_of_virtualizable >= 0: @@ -1403,8 +1705,8 @@ fielddescr = jd.vable_token_descr assert isinstance(fielddescr, BaseFieldDescr) ofs = fielddescr.offset - mc.MOV(eax, arglocs[1]) - mc.MOV(addr_add(eax, imm(ofs)), imm(0)) + self.mc.MOV(eax, arglocs[1]) + self.mc.MOV_mi((eax.value, ofs), 0) # in the line above, TOKEN_NONE = 0 # if op.result is not None: @@ -1413,27 +1715,26 @@ if kind == FLOAT: xmmtmp = X86XMMRegisterManager.all_regs[0] adr = self.fail_boxes_float.get_addr_for_num(0) - mc.MOVSD(xmmtmp, heap64(adr)) - mc.MOVSD(result_loc, xmmtmp) + self.mc.MOVSD(xmmtmp, heap(adr)) + self.mc.MOVSD(result_loc, xmmtmp) else: assert result_loc is eax if kind == INT: adr = self.fail_boxes_int.get_addr_for_num(0) - mc.MOV(eax, heap(adr)) + self.mc.MOV(eax, heap(adr)) elif kind == REF: adr = self.fail_boxes_ptr.get_addr_for_num(0) - mc.XOR(eax, eax) - mc.XCHG(eax, heap(adr)) + self.mc.XOR_rr(eax.value, eax.value) + self.mc.XCHG(eax, heap(adr)) else: raise AssertionError(kind) # # Here we join Path A and Path B again - offset = mc.get_relative_pos() - jmp_location + offset = self.mc.get_relative_pos() - jmp_location assert 0 <= offset <= 127 - mc.overwrite(jmp_location - 1, [chr(offset)]) - self._stop_block() - self.mc.CMP(mem(ebp, FORCE_INDEX_OFS), imm(0)) - return self.implement_guard(addr, self.mc.JL) + self.mc.overwrite(jmp_location - 1, [chr(offset)]) + self.mc.CMP_bi(FORCE_INDEX_OFS, 0) + return self.implement_guard(guard_token, 'L') def genop_discard_cond_call_gc_wb(self, op, arglocs): # use 'mc._mc' directly instead of 'mc', to avoid @@ -1443,31 +1744,51 @@ cls = self.cpu.gc_ll_descr.has_write_barrier_class() assert cls is not None and isinstance(descr, cls) loc_base = arglocs[0] - mc = self._start_block() - mc.TEST(mem8(loc_base, descr.jit_wb_if_flag_byteofs), - imm8(descr.jit_wb_if_flag_singlebyte)) - mc.JZ(rel8_patched_later) - jz_location = mc.get_relative_pos() + self.mc.ensure_bytes_available(256) + self.mc.TEST8_mi((loc_base.value, descr.jit_wb_if_flag_byteofs), + descr.jit_wb_if_flag_singlebyte) + self.mc.J_il8(rx86.Conditions['Z'], 0) # patched later + jz_location = self.mc.get_relative_pos() # the following is supposed to be the slow path, so whenever possible # we choose the most compact encoding over the most efficient one. for i in range(len(arglocs)-1, -1, -1): - mc.PUSH(arglocs[i]) + loc = arglocs[i] + if isinstance(loc, RegLoc): + self.mc.PUSH_r(loc.value) + else: + if IS_X86_64: + self.mc.MOV_ri(X86_64_SCRATCH_REG.value, loc.getint()) + self.mc.PUSH_r(X86_64_SCRATCH_REG.value) + else: + self.mc.PUSH_i32(loc.getint()) + + if IS_X86_64: + # We clobber these registers to pass the arguments, but that's + # okay, because consider_cond_call_gc_wb makes sure that any + # caller-save registers with values in them are present in arglocs, + # so they are saved on the stack above and restored below + self.mc.MOV_rs(edi.value, 0) + self.mc.MOV_rs(esi.value, 8) + # misaligned stack in the call, but it's ok because the write barrier # is not going to call anything more. Also, this assumes that the # write barrier does not touch the xmm registers. - mc.CALL(rel32(descr.get_write_barrier_fn(self.cpu))) + self.mc.CALL(imm(descr.get_write_barrier_fn(self.cpu))) for i in range(len(arglocs)): loc = arglocs[i] - assert isinstance(loc, REG) - mc.POP(loc) + if isinstance(loc, RegLoc): + self.mc.POP_r(loc.value) + else: + self.mc.ADD_ri(esp.value, WORD) # ignore the pushed constant # patch the JZ above - offset = mc.get_relative_pos() - jz_location + offset = self.mc.get_relative_pos() - jz_location assert 0 < offset <= 127 - mc.overwrite(jz_location-1, [chr(offset)]) - self._stop_block() + self.mc.overwrite(jz_location-1, [chr(offset)]) def genop_force_token(self, op, arglocs, resloc): - self.mc.LEA(resloc, mem(ebp, FORCE_INDEX_OFS)) + # RegAlloc.consider_force_token ensures this: + assert isinstance(resloc, RegLoc) + self.mc.LEA_rb(resloc.value, FORCE_INDEX_OFS) def not_implemented_op_discard(self, op, arglocs): msg = "not implemented operation: %s" % op.getopname() @@ -1495,17 +1816,16 @@ return loop_token._x86_arglocs def closing_jump(self, loop_token): - self.mc.JMP(rel32(loop_token._x86_loop_code)) + self.mc.JMP(imm(loop_token._x86_loop_code)) def malloc_cond_fixedsize(self, nursery_free_adr, nursery_top_adr, size, tid): - # don't use self.mc - mc = self._start_block() - mc.MOV(eax, heap(nursery_free_adr)) - mc.LEA(edx, addr_add(eax, imm(size))) - mc.CMP(edx, heap(nursery_top_adr)) - mc.JNA(rel8_patched_later) - jmp_adr = mc.get_relative_pos() + self.mc.ensure_bytes_available(256) + self.mc.MOV(eax, heap(nursery_free_adr)) + self.mc.LEA_rm(edx.value, (eax.value, size)) + self.mc.CMP(edx, heap(nursery_top_adr)) + self.mc.J_il8(rx86.Conditions['NA'], 0) # patched later + jmp_adr = self.mc.get_relative_pos() # See comments in _build_malloc_fixedsize_slowpath for the # details of the two helper functions that we are calling below. @@ -1521,17 +1841,16 @@ # reserve room for the argument to the real malloc and the # 8 saved XMM regs self._regalloc.reserve_param(1+16) - mc.CALL(rel32(slowpath_addr1)) + self.mc.CALL(imm(slowpath_addr1)) self.mark_gc_roots() slowpath_addr2 = self.malloc_fixedsize_slowpath2 - mc.CALL(rel32(slowpath_addr2)) + self.mc.CALL(imm(slowpath_addr2)) - offset = mc.get_relative_pos() - jmp_adr + offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 - mc.overwrite(jmp_adr-1, [chr(offset)]) - mc.MOV(addr_add(eax, imm(0)), imm(tid)) - mc.MOV(heap(nursery_free_adr), edx) - self._stop_block() + self.mc.overwrite(jmp_adr-1, [chr(offset)]) + self.mc.MOV_mi((eax.value, 0), tid) + self.mc.MOV(heap(nursery_free_adr), edx) genop_discard_list = [Assembler386.not_implemented_op_discard] * rop._LAST genop_list = [Assembler386.not_implemented_op] * rop._LAST @@ -1551,32 +1870,20 @@ num = getattr(rop, opname.upper()) genop_list[num] = value -def new_addr_add(heap, mem, memsib): - def addr_add(reg_or_imm1, reg_or_imm2, offset=0, scale=0): - if isinstance(reg_or_imm1, IMM32): - if isinstance(reg_or_imm2, IMM32): - return heap(reg_or_imm1.value + offset + - (reg_or_imm2.value << scale)) - else: - return memsib(None, reg_or_imm2, scale, reg_or_imm1.value + offset) - else: - if isinstance(reg_or_imm2, IMM32): - return mem(reg_or_imm1, offset + (reg_or_imm2.value << scale)) - else: - return memsib(reg_or_imm1, reg_or_imm2, scale, offset) - return addr_add - -addr8_add = new_addr_add(heap8, mem8, memSIB8) -addr_add = new_addr_add(heap, mem, memSIB) -addr64_add = new_addr_add(heap64, mem64, memSIB64) - -def addr_add_const(reg_or_imm1, offset): - if isinstance(reg_or_imm1, IMM32): - return heap(reg_or_imm1.value + offset) - else: - return mem(reg_or_imm1, offset) - def round_up_to_4(size): if size < 4: return 4 return size + +# XXX: ri386 migration shims: +def addr_add(reg_or_imm1, reg_or_imm2, offset=0, scale=0): + return AddressLoc(reg_or_imm1, reg_or_imm2, scale, offset) + +def addr_add_const(reg_or_imm1, offset): + return AddressLoc(reg_or_imm1, ImmedLoc(0), 0, offset) + +def mem(loc, offset): + return AddressLoc(loc, ImmedLoc(0), 0, offset) + +def heap(addr): + return AddressLoc(ImmedLoc(addr), ImmedLoc(0), 0, 0) Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/codebuf.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/codebuf.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/codebuf.py Thu Sep 9 01:00:13 2010 @@ -2,12 +2,20 @@ import os, sys from pypy.rpython.lltypesystem import lltype, rffi from pypy.translator.tool.cbuild import ExternalCompilationInfo -from pypy.jit.backend.x86.ri386 import I386CodeBuilder +from pypy.jit.backend.x86.rx86 import X86_32_CodeBuilder, X86_64_CodeBuilder +from pypy.jit.backend.x86.regloc import LocationCodeBuilder from pypy.rlib.rmmap import PTR, alloc, free from pypy.rlib.debug import make_sure_not_resized +from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64 +from pypy.rlib.objectmodel import we_are_translated +# XXX: Seems nasty to change the superclass of InMemoryCodeBuilder like this +if IS_X86_32: + codebuilder_cls = X86_32_CodeBuilder +elif IS_X86_64: + codebuilder_cls = X86_64_CodeBuilder -class InMemoryCodeBuilder(I386CodeBuilder): +class InMemoryCodeBuilder(codebuilder_cls, LocationCodeBuilder): _last_dump_start = 0 def __init__(self, start, end): @@ -31,13 +39,15 @@ def write(self, listofchars): self._pos = self.overwrite(self._pos, listofchars) - def writechr(self, n): - # purely for performance: don't make the one-element list [chr(n)] + def writechar(self, char): pos = self._pos assert pos + 1 <= self._size - self._data[pos] = chr(n) + self._data[pos] = char self._pos = pos + 1 + def writechr(self, n): + self.writechar(chr(n)) + def get_relative_pos(self): return self._pos @@ -50,11 +60,6 @@ self._pos = pos self._last_dump_start = pos - def execute(self, arg1, arg2): - # XXX old testing stuff - fnptr = rffi.cast(lltype.Ptr(BINARYFN), self._data) - return fnptr(arg1, arg2) - def done(self): # normally, no special action is needed here if machine_code_dumper.enabled: @@ -77,9 +82,6 @@ valgrind.discard_translations(self._data, self._size) -BINARYFN = lltype.FuncType([lltype.Signed, lltype.Signed], lltype.Signed) - - class MachineCodeDumper: enabled = True log_fd = -1 @@ -107,7 +109,10 @@ return False # log the executable name from pypy.jit.backend.hlinfo import highleveljitinfo - os.write(self.log_fd, 'BACKEND i386\n') + if IS_X86_32: + os.write(self.log_fd, 'BACKEND x86\n') + elif IS_X86_64: + os.write(self.log_fd, 'BACKEND x86_64\n') if highleveljitinfo.sys_executable: os.write(self.log_fd, 'SYS_EXECUTABLE %s\n' % ( highleveljitinfo.sys_executable,)) @@ -137,6 +142,12 @@ def __init__(self, map_size): data = alloc(map_size) + if IS_X86_64 and not we_are_translated(): + # Hack to make sure that mcs are not within 32-bits of one + # another for testing purposes + from pypy.rlib.rmmap import hint + hint.pos += 0xFFFFFFFF + self._init(data, map_size) def __del__(self): Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/jump.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/jump.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/jump.py Thu Sep 9 01:00:13 2010 @@ -1,23 +1,6 @@ import sys from pypy.tool.pairtype import extendabletype -from pypy.jit.backend.x86.ri386 import * - -class __extend__(OPERAND): - __metaclass__ = extendabletype - def _getregkey(self): - raise AssertionError("should only happen to registers and frame " - "positions") - -class __extend__(REG): - __metaclass__ = extendabletype - def _getregkey(self): - return ~self.op - -class __extend__(MODRM): - __metaclass__ = extendabletype - def _getregkey(self): - return self.position - +from pypy.jit.backend.x86.regloc import ImmedLoc, StackLoc def remap_frame_layout(assembler, src_locations, dst_locations, tmpreg): pending_dests = len(dst_locations) @@ -27,7 +10,7 @@ srccount[dst._getregkey()] = 0 for i in range(len(dst_locations)): src = src_locations[i] - if isinstance(src, IMM32): + if isinstance(src, ImmedLoc): continue key = src._getregkey() if key in srccount: @@ -46,7 +29,7 @@ srccount[key] = -1 # means "it's done" pending_dests -= 1 src = src_locations[i] - if not isinstance(src, IMM32): + if not isinstance(src, ImmedLoc): key = src._getregkey() if key in srccount: srccount[key] -= 1 @@ -80,7 +63,7 @@ assert pending_dests == 0 def _move(assembler, src, dst, tmpreg): - if isinstance(dst, MODRM) and isinstance(src, MODRM): + if dst.is_memory_reference() and src.is_memory_reference(): assembler.regalloc_mov(src, tmpreg) src = tmpreg assembler.regalloc_mov(src, dst) Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py Thu Sep 9 01:00:13 2010 @@ -5,7 +5,7 @@ from pypy.jit.metainterp.history import (Box, Const, ConstInt, ConstPtr, ResOperation, BoxPtr, LoopToken, INT, REF, FLOAT) -from pypy.jit.backend.x86.ri386 import * +from pypy.jit.backend.x86.regloc import * from pypy.rpython.lltypesystem import lltype, ll2ctypes, rffi, rstr from pypy.rlib.objectmodel import we_are_translated from pypy.rlib import rgc @@ -17,16 +17,7 @@ from pypy.jit.backend.llsupport.descr import BaseCallDescr, BaseSizeDescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ TempBox - -WORD = 4 -FRAME_FIXED_SIZE = 5 # ebp + ebx + esi + edi + force_index = 5 words -FORCE_INDEX_OFS = -4*WORD - -width_of_type = { - INT : 1, - REF : 1, - FLOAT : 2, - } +from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE, IS_X86_32, IS_X86_64 class X86RegisterManager(RegisterManager): @@ -35,6 +26,12 @@ no_lower_byte_regs = [esi, edi] save_around_call_regs = [eax, edx, ecx] + REGLOC_TO_GCROOTMAP_REG_INDEX = { + ebx: 1, + esi: 2, + edi: 3, + } + def call_result_location(self, v): return eax @@ -50,12 +47,26 @@ print "convert_to_imm: got a %s" % c raise AssertionError +class X86_64_RegisterManager(X86RegisterManager): + # r11 omitted because it's used as scratch + all_regs = [eax, ecx, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15] + no_lower_byte_regs = [] + save_around_call_regs = [eax, ecx, edx, esi, edi, r8, r9, r10] + + REGLOC_TO_GCROOTMAP_REG_INDEX = { + ebx: 1, + r12: 2, + r13: 3, + r14: 4, + r15: 5, + } class FloatConstants(object): BASE_CONSTANT_SIZE = 1000 def __init__(self): self.cur_array_free = 0 + self.const_id = 0 def _get_new_array(self): n = self.BASE_CONSTANT_SIZE @@ -71,7 +82,8 @@ n = self.cur_array_free - 1 arr[n] = floatval self.cur_array_free = n - return rffi.cast(lltype.Signed, arr) + n * 8 + self.const_id += 1 + return (self.const_id, rffi.cast(lltype.Signed, arr) + n * 8) class X86XMMRegisterManager(RegisterManager): @@ -80,7 +92,6 @@ all_regs = [xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7] # we never need lower byte I hope save_around_call_regs = all_regs - reg_width = 2 def __init__(self, longevity, frame_manager=None, assembler=None): RegisterManager.__init__(self, longevity, frame_manager=frame_manager, @@ -93,28 +104,36 @@ self.float_constants = assembler._float_constants def convert_to_imm(self, c): - adr = self.float_constants.record_float(c.getfloat()) - return heap64(adr) + const_id, adr = self.float_constants.record_float(c.getfloat()) + return ConstFloatLoc(adr, const_id) def after_call(self, v): # the result is stored in st0, but we don't have this around, # so genop_call will move it to some frame location immediately # after the call - return self.frame_manager.loc(v, 2) + return self.frame_manager.loc(v) -class X86FrameManager(FrameManager): +class X86_64_XMMRegisterManager(X86XMMRegisterManager): + # xmm15 reserved for scratch use + all_regs = [xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14] + save_around_call_regs = all_regs + + def call_result_location(self, v): + return xmm0 + + def after_call(self, v): + # We use RegisterManager's implementation, since X86XMMRegisterManager + # places the result on the stack, which we don't need to do when the + # calling convention places the result in xmm0 + return RegisterManager.after_call(self, v) +class X86FrameManager(FrameManager): @staticmethod - def frame_pos(i, size): - if size == 1: - res = mem(ebp, get_ebp_ofs(i)) - elif size == 2: - res = mem64(ebp, get_ebp_ofs(i + 1)) - else: - print "Unimplemented size %d" % i - raise NotImplementedError("unimplemented size %d" % i) - res.position = i - return res + def frame_pos(i, box_type): + if IS_X86_32 and box_type == FLOAT: + return StackLoc(i, get_ebp_ofs(i+1), 2, box_type) + else: + return StackLoc(i, get_ebp_ofs(i), 1, box_type) class RegAlloc(object): exc = False @@ -135,11 +154,21 @@ # compute longevity of variables longevity = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity - self.rm = X86RegisterManager(longevity, - frame_manager = self.fm, - assembler = self.assembler) - self.xrm = X86XMMRegisterManager(longevity, frame_manager = self.fm, - assembler = self.assembler) + # XXX + if cpu.WORD == 4: + gpr_reg_mgr_cls = X86RegisterManager + xmm_reg_mgr_cls = X86XMMRegisterManager + elif cpu.WORD == 8: + gpr_reg_mgr_cls = X86_64_RegisterManager + xmm_reg_mgr_cls = X86_64_XMMRegisterManager + else: + raise AssertionError("Word size should be 4 or 8") + + self.rm = gpr_reg_mgr_cls(longevity, + frame_manager = self.fm, + assembler = self.assembler) + self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, + assembler = self.assembler) def prepare_loop(self, inputargs, operations, looptoken): self._prepare(inputargs, operations) @@ -184,7 +213,7 @@ if reg: loc = reg else: - loc = self.fm.loc(arg, width_of_type[arg.type]) + loc = self.fm.loc(arg) if arg.type == FLOAT: floatlocs[i] = loc else: @@ -252,23 +281,23 @@ arg = inputargs[i] i += 1 if arg.type == FLOAT: - if isinstance(loc, REG): + if isinstance(loc, RegLoc): self.xrm.reg_bindings[arg] = loc used[loc] = None else: self.fm.frame_bindings[arg] = loc else: - if isinstance(loc, REG): + if isinstance(loc, RegLoc): self.rm.reg_bindings[arg] = loc used[loc] = None else: self.fm.frame_bindings[arg] = loc self.rm.free_regs = [] - for reg in X86RegisterManager.all_regs: + for reg in self.rm.all_regs: if reg not in used: self.rm.free_regs.append(reg) self.xrm.free_regs = [] - for reg in X86XMMRegisterManager.all_regs: + for reg in self.xrm.all_regs: if reg not in used: self.xrm.free_regs.append(reg) # note: we need to make a copy of inputargs because possibly_free_vars @@ -646,7 +675,7 @@ vable_index = jd.index_of_virtualizable if vable_index >= 0: self.rm._sync_var(op.args[vable_index]) - vable = self.fm.loc(op.args[vable_index], 1) + vable = self.fm.loc(op.args[vable_index]) else: vable = imm(0) self._call(op, [imm(size), vable] + @@ -655,13 +684,12 @@ def consider_cond_call_gc_wb(self, op): assert op.result is None + loc_newvalue = self.rm.make_sure_var_in_reg(op.args[1], op.args) + # ^^^ we force loc_newvalue in a reg (unless it's a Const), + # because it will be needed anyway by the following setfield_gc. + # It avoids loading it twice from the memory. loc_base = self.rm.make_sure_var_in_reg(op.args[0], op.args, imm_fine=False) - loc_newvalue = self.rm.make_sure_var_in_reg(op.args[1], op.args, - imm_fine=False) - # ^^^ we also force loc_newvalue in a reg, because it will be needed - # anyway by the following setfield_gc. It avoids loading it twice - # from the memory. arglocs = [loc_base, loc_newvalue] # add eax, ecx and edx as extra "arguments" to ensure they are # saved and restored. Fish in self.rm to know which of these @@ -670,7 +698,7 @@ # function, a GC write barrier, is known not to touch them. # See remember_young_pointer() in rpython/memory/gc/generation.py. for v, reg in self.rm.reg_bindings.items(): - if ((reg is eax or reg is ecx or reg is edx) + if (reg in self.rm.save_around_call_regs and self.rm.stays_alive(v)): arglocs.append(reg) self.PerformDiscard(op, arglocs) @@ -679,23 +707,18 @@ def _fastpath_malloc(self, op, descr): assert isinstance(descr, BaseSizeDescr) gc_ll_descr = self.assembler.cpu.gc_ll_descr - tmp0 = TempBox() self.rm.force_allocate_reg(op.result, selected_reg=eax) - self.rm.force_allocate_reg(tmp0, selected_reg=edx) - # XXX about the next 10 lines: why not just say - # force_allocate_reg(tmp1, selected_reg=ecx)????? - for v, reg in self.rm.reg_bindings.items(): - if reg is ecx: - to_sync = v - break - else: - to_sync = None - if to_sync is not None: - self.rm._sync_var(to_sync) - del self.rm.reg_bindings[to_sync] - self.rm.free_regs.append(ecx) - # we need to do it here, so edx is not in reg_bindings - self.rm.possibly_free_var(tmp0) + # We need to force-allocate each of save_around_call_regs now. + # The alternative would be to save and restore them around the + # actual call to malloc(), in the rare case where we need to do + # it; however, mark_gc_roots() would need to be adapted to know + # where the variables end up being saved. Messy. + for reg in self.rm.save_around_call_regs: + if reg is not eax: + tmp_box = TempBox() + self.rm.force_allocate_reg(tmp_box, selected_reg=reg) + self.rm.possibly_free_var(tmp_box) + self.assembler.malloc_cond_fixedsize( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), @@ -809,7 +832,7 @@ def consider_setfield_gc(self, op): ofs_loc, size_loc, ptr = self._unpack_fielddescr(op.descr) - assert isinstance(size_loc, IMM32) + assert isinstance(size_loc, ImmedLoc) if size_loc.value == 1: need_lower_byte = True else: @@ -947,24 +970,17 @@ pass def get_mark_gc_roots(self, gcrootmap): - shape = gcrootmap.get_basic_shape() + shape = gcrootmap.get_basic_shape(IS_X86_64) for v, val in self.fm.frame_bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): - assert isinstance(val, MODRM) + assert isinstance(val, StackLoc) gcrootmap.add_ebp_offset(shape, get_ebp_ofs(val.position)) for v, reg in self.rm.reg_bindings.items(): if reg is eax: continue # ok to ignore this one if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): - if reg is ebx: - gcrootmap.add_ebx(shape) - elif reg is esi: - gcrootmap.add_esi(shape) - elif reg is edi: - gcrootmap.add_edi(shape) - else: - print "[get_mark_gc_roots] bogus register", reg - assert False + assert reg in self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX + gcrootmap.add_callee_save_reg(shape, self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX[reg]) return gcrootmap.compress_callshape(shape) def consider_force_token(self, op): Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/runner.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/runner.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/runner.py Thu Sep 9 01:00:13 2010 @@ -4,11 +4,13 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp import history, compile from pypy.jit.backend.x86.assembler import Assembler386 -from pypy.jit.backend.x86.regalloc import FORCE_INDEX_OFS +from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS from pypy.jit.backend.x86.profagent import ProfileAgent from pypy.jit.backend.llsupport.llmodel import AbstractLLCPU +from pypy.jit.backend.x86 import regloc +import sys -class CPU386(AbstractLLCPU): +class AbstractX86CPU(AbstractLLCPU): debug = True supports_floats = True @@ -44,6 +46,7 @@ self.profile_agent.startup() def finish_once(self): + self.assembler.finish_once() self.profile_agent.shutdown() def compile_loop(self, inputargs, operations, looptoken): @@ -131,10 +134,28 @@ assert fail_index == fail_index_2 return faildescr +class CPU386(AbstractX86CPU): + WORD = 4 + NUM_REGS = 8 + CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.esi, regloc.edi] + FRAME_FIXED_SIZE = len(CALLEE_SAVE_REGISTERS) + 2 + + def __init__(self, *args, **kwargs): + assert sys.maxint == (2**31 - 1) + super(CPU386, self).__init__(*args, **kwargs) class CPU386_NO_SSE2(CPU386): supports_floats = False +class CPU_X86_64(AbstractX86CPU): + WORD = 8 + NUM_REGS = 16 + CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.r12, regloc.r13, regloc.r14, regloc.r15] + FRAME_FIXED_SIZE = len(CALLEE_SAVE_REGISTERS) + 2 + + def __init__(self, *args, **kwargs): + assert sys.maxint == (2**63 - 1) + super(CPU_X86_64, self).__init__(*args, **kwargs) CPU = CPU386 Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/conftest.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/test/conftest.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/conftest.py Thu Sep 9 01:00:13 2010 @@ -3,6 +3,5 @@ cpu = detect_cpu.autodetect() def pytest_runtest_setup(item): - if cpu != 'x86': - py.test.skip("x86 directory skipped: cpu is %r" % (cpu,)) - + if cpu not in ('x86', 'x86_64'): + py.test.skip("x86/x86_64 tests skipped: cpu is %r" % (cpu,)) Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_assembler.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_assembler.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_assembler.py Thu Sep 9 01:00:13 2010 @@ -1,14 +1,22 @@ -from pypy.jit.backend.x86.ri386 import * +from pypy.jit.backend.x86.regloc import * from pypy.jit.backend.x86.assembler import Assembler386, MachineCodeBlockWrapper from pypy.jit.backend.x86.regalloc import X86FrameManager, get_ebp_ofs -from pypy.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat +from pypy.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat, INT, REF, FLOAT from pypy.rlib.rarithmetic import intmask from pypy.rpython.lltypesystem import lltype, llmemory, rffi +from pypy.jit.backend.x86.arch import WORD, IS_X86_32, IS_X86_64 +from pypy.jit.backend.detect_cpu import getcpuclass +from pypy.jit.backend.x86.regalloc import X86RegisterManager, X86_64_RegisterManager, X86XMMRegisterManager, X86_64_XMMRegisterManager +ACTUAL_CPU = getcpuclass() class FakeCPU: rtyper = None supports_floats = True + NUM_REGS = ACTUAL_CPU.NUM_REGS + + def fielddescrof(self, STRUCT, name): + return 42 class FakeMC: def __init__(self, base_address=0): @@ -25,7 +33,14 @@ self.content.append(("JMP", args)) def done(self): pass + def PUSH_r(self, reg): + pass + def POP_r(self, reg): + pass +class FakeAssembler: + def write_pending_failure_recoveries(self): + pass def test_write_failure_recovery_description(): assembler = Assembler386(FakeCPU()) @@ -33,12 +48,12 @@ failargs = [BoxInt(), BoxPtr(), BoxFloat()] * 3 failargs.insert(6, None) failargs.insert(7, None) - locs = [X86FrameManager.frame_pos(0, 1), - X86FrameManager.frame_pos(1, 1), - X86FrameManager.frame_pos(10, 2), - X86FrameManager.frame_pos(100, 1), - X86FrameManager.frame_pos(101, 1), - X86FrameManager.frame_pos(110, 2), + locs = [X86FrameManager.frame_pos(0, INT), + X86FrameManager.frame_pos(1, REF), + X86FrameManager.frame_pos(10, FLOAT), + X86FrameManager.frame_pos(100, INT), + X86FrameManager.frame_pos(101, REF), + X86FrameManager.frame_pos(110, FLOAT), None, None, ebx, @@ -46,17 +61,17 @@ xmm2] assert len(failargs) == len(locs) assembler.write_failure_recovery_description(mc, failargs, locs) - nums = [Assembler386.DESCR_INT + 4*(8+0), - Assembler386.DESCR_REF + 4*(8+1), - Assembler386.DESCR_FLOAT + 4*(8+10), - Assembler386.DESCR_INT + 4*(8+100), - Assembler386.DESCR_REF + 4*(8+101), - Assembler386.DESCR_FLOAT + 4*(8+110), + nums = [Assembler386.DESCR_INT + 4*(16+0), + Assembler386.DESCR_REF + 4*(16+1), + Assembler386.DESCR_FLOAT + 4*(16+10), + Assembler386.DESCR_INT + 4*(16+100), + Assembler386.DESCR_REF + 4*(16+101), + Assembler386.DESCR_FLOAT + 4*(16+110), Assembler386.CODE_HOLE, Assembler386.CODE_HOLE, - Assembler386.DESCR_INT + 4*ebx.op, - Assembler386.DESCR_REF + 4*esi.op, - Assembler386.DESCR_FLOAT + 4*xmm2.op] + Assembler386.DESCR_INT + 4*ebx.value, + Assembler386.DESCR_REF + 4*esi.value, + Assembler386.DESCR_FLOAT + 4*xmm2.value] double_byte_nums = [] for num in nums[3:6]: double_byte_nums.append((num & 0x7F) | 0x80) @@ -94,6 +109,9 @@ return lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(S)) def get_random_float(): + # Returns , , + # NB: on 64-bit, will be the entire float and + # will be random garbage from malloc! assert withfloats value = random.random() - 0.5 # make sure it fits into 64 bits @@ -101,9 +119,16 @@ rffi.cast(rffi.DOUBLEP, tmp)[0] = value return rffi.cast(rffi.DOUBLEP, tmp)[0], tmp[0], tmp[1] + if IS_X86_32: + main_registers = X86RegisterManager.all_regs + xmm_registers = X86XMMRegisterManager.all_regs + elif IS_X86_64: + main_registers = X86_64_RegisterManager.all_regs + xmm_registers = X86_64_XMMRegisterManager.all_regs + # memory locations: 26 integers, 26 pointers, 26 floats # main registers: half of them as signed and the other half as ptrs - # xmm registers: all floats, from xmm0 to xmm7 + # xmm registers: all floats, from xmm0 to xmm(7|15) # holes: 8 locations = [] baseloc = 4 @@ -117,18 +142,17 @@ content = ([('int', locations.pop()) for _ in range(26)] + [('ptr', locations.pop()) for _ in range(26)] + [(['int', 'ptr'][random.randrange(0, 2)], reg) - for reg in [eax, ecx, edx, ebx, esi, edi]]) + for reg in main_registers]) if withfloats: content += ([('float', locations.pop()) for _ in range(26)] + - [('float', reg) for reg in [xmm0, xmm1, xmm2, xmm3, - xmm4, xmm5, xmm6, xmm7]]) + [('float', reg) for reg in xmm_registers]) for i in range(8): content.append(('hole', None)) random.shuffle(content) # prepare the expected target arrays, the descr_bytecode, # the 'registers' and the 'stack' arrays according to 'content' - xmmregisters = lltype.malloc(rffi.LONGP.TO, 16+9, flavor='raw') + xmmregisters = lltype.malloc(rffi.LONGP.TO, 16+ACTUAL_CPU.NUM_REGS+1, flavor='raw') registers = rffi.ptradd(xmmregisters, 16) stacklen = baseloc + 10 stack = lltype.malloc(rffi.LONGP.TO, stacklen, flavor='raw') @@ -140,8 +164,8 @@ assert loc >= 0 ofs = get_ebp_ofs(loc) assert ofs < 0 - assert (ofs % 4) == 0 - stack[stacklen + ofs//4] = value + assert (ofs % WORD) == 0 + stack[stacklen + ofs//WORD] = value descr_bytecode = [] for i, (kind, loc) in enumerate(content): @@ -152,12 +176,18 @@ value, lo, hi = get_random_float() expected_floats[i] = value kind = Assembler386.DESCR_FLOAT - if isinstance(loc, REG): - xmmregisters[2*loc.op] = lo - xmmregisters[2*loc.op+1] = hi + if isinstance(loc, RegLoc): + if WORD == 4: + xmmregisters[2*loc.value] = lo + xmmregisters[2*loc.value+1] = hi + elif WORD == 8: + xmmregisters[loc.value] = lo else: - write_in_stack(loc, hi) - write_in_stack(loc+1, lo) + if WORD == 4: + write_in_stack(loc, hi) + write_in_stack(loc+1, lo) + elif WORD == 8: + write_in_stack(loc, lo) else: if kind == 'int': value = get_random_int() @@ -170,15 +200,15 @@ value = rffi.cast(rffi.LONG, value) else: assert 0, kind - if isinstance(loc, REG): - registers[loc.op] = value + if isinstance(loc, RegLoc): + registers[loc.value] = value else: write_in_stack(loc, value) - if isinstance(loc, REG): - num = kind + 4*loc.op + if isinstance(loc, RegLoc): + num = kind + 4*loc.value else: - num = kind + 4*(8+loc) + num = kind + Assembler386.CODE_FROMSTACK + (4*loc) while num >= 0x80: descr_bytecode.append((num & 0x7F) | 0x80) num >>= 7 @@ -195,8 +225,8 @@ for i in range(len(descr_bytecode)): assert 0 <= descr_bytecode[i] <= 255 descr_bytes[i] = rffi.cast(rffi.UCHAR, descr_bytecode[i]) - registers[8] = rffi.cast(rffi.LONG, descr_bytes) - registers[ebp.op] = rffi.cast(rffi.LONG, stack) + 4*stacklen + registers[ACTUAL_CPU.NUM_REGS] = rffi.cast(rffi.LONG, descr_bytes) + registers[ebp.value] = rffi.cast(rffi.LONG, stack) + WORD*stacklen # run! assembler = Assembler386(FakeCPU()) @@ -237,7 +267,8 @@ def test_mc_wrapper_profile_agent(): agent = FakeProfileAgent() - mc = FakeMCWrapper(100, agent) + assembler = FakeAssembler() + mc = FakeMCWrapper(assembler, 100, agent) mc.start_function("abc") mc.writechr("x") mc.writechr("x") Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_basic.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_basic.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_basic.py Thu Sep 9 01:00:13 2010 @@ -1,5 +1,5 @@ import py -from pypy.jit.backend.x86.runner import CPU386 +from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.metainterp.warmspot import ll_meta_interp from pypy.jit.metainterp.test import test_basic from pypy.jit.codewriter.policy import StopAtXPolicy @@ -7,7 +7,7 @@ class Jit386Mixin(test_basic.LLJitMixin): type_system = 'lltype' - CPUClass = CPU386 + CPUClass = getcpuclass() def check_jumps(self, maxcount): pass Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_gc_integration.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_gc_integration.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_gc_integration.py Thu Sep 9 01:00:13 2010 @@ -9,13 +9,13 @@ from pypy.jit.codewriter import heaptracker from pypy.jit.backend.llsupport.descr import GcCache from pypy.jit.backend.llsupport.gc import GcLLDescription -from pypy.jit.backend.x86.runner import CPU -from pypy.jit.backend.x86.regalloc import RegAlloc, WORD, FRAME_FIXED_SIZE +from pypy.jit.backend.detect_cpu import getcpuclass +from pypy.jit.backend.x86.regalloc import RegAlloc +from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE from pypy.jit.metainterp.test.oparser import parse from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.annlowlevel import llhelper from pypy.rpython.lltypesystem import rclass, rstr -from pypy.jit.backend.x86.ri386 import * from pypy.jit.backend.llsupport.gc import GcLLDescr_framework, GcRefList, GcPtrFieldDescr from pypy.jit.backend.x86.test.test_regalloc import MockAssembler @@ -23,17 +23,16 @@ from pypy.jit.backend.x86.regalloc import X86RegisterManager, X86FrameManager,\ X86XMMRegisterManager +CPU = getcpuclass() + class MockGcRootMap(object): - def get_basic_shape(self): + def get_basic_shape(self, is_64_bit): return ['shape'] def add_ebp_offset(self, shape, offset): shape.append(offset) - def add_ebx(self, shape): - shape.append('ebx') - def add_esi(self, shape): - shape.append('esi') - def add_edi(self, shape): - shape.append('edi') + def add_callee_save_reg(self, shape, reg_index): + index_to_name = { 1: 'ebx', 2: 'esi', 3: 'edi' } + shape.append(index_to_name[reg_index]) def compress_callshape(self, shape): assert shape[0] == 'shape' return ['compressed'] + shape[1:] @@ -51,7 +50,7 @@ def initialize(self): self.gcrefs = GcRefList() self.gcrefs.initialize() - self.single_gcref_descr = GcPtrFieldDescr(0) + self.single_gcref_descr = GcPtrFieldDescr('', 0) rewrite_assembler = GcLLDescr_framework.rewrite_assembler.im_func @@ -84,7 +83,7 @@ mark = regalloc.get_mark_gc_roots(cpu.gc_ll_descr.gcrootmap) assert mark[0] == 'compressed' base = -WORD * FRAME_FIXED_SIZE - expected = ['ebx', 'esi', 'edi', base, base-4, base-8] + expected = ['ebx', 'esi', 'edi', base, base-WORD, base-WORD*2] assert dict.fromkeys(mark[1:]) == dict.fromkeys(expected) class TestRegallocGcIntegration(BaseTestRegalloc): @@ -175,7 +174,7 @@ self.addrs[1] = self.addrs[0] + 64 # 64 bytes def malloc_slowpath(size): - assert size == 8 + assert size == WORD*2 nadr = rffi.cast(lltype.Signed, self.nursery) self.addrs[0] = nadr + size return nadr @@ -199,7 +198,7 @@ return rffi.cast(lltype.Signed, self.addrs) def get_nursery_top_addr(self): - return rffi.cast(lltype.Signed, self.addrs) + 4 + return rffi.cast(lltype.Signed, self.addrs) + WORD def get_malloc_fixedsize_slowpath_addr(self): fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) @@ -213,7 +212,7 @@ def setup_method(self, method): cpu = CPU(None, None) - cpu.vtable_offset = 4 + cpu.vtable_offset = WORD cpu.gc_ll_descr = GCDescrFastpathMalloc() NODE = lltype.Struct('node', ('tid', lltype.Signed), @@ -249,7 +248,7 @@ assert gc_ll_descr.nursery[0] == self.nodedescr.tid assert gc_ll_descr.nursery[1] == 42 nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + 8 + assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*2) def test_malloc_slowpath(self): ops = ''' @@ -269,7 +268,7 @@ # this should call slow path once gc_ll_descr = self.cpu.gc_ll_descr nadr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nadr + 8 + assert gc_ll_descr.addrs[0] == nadr + (WORD*2) def test_new_with_vtable(self): ops = ''' @@ -284,4 +283,4 @@ assert gc_ll_descr.nursery[0] == self.descrsize.tid assert gc_ll_descr.nursery[1] == self.vtable_int nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + 12 + assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*3) Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_jump.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_jump.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_jump.py Thu Sep 9 01:00:13 2010 @@ -1,6 +1,7 @@ -from pypy.jit.backend.x86.ri386 import * +from pypy.jit.backend.x86.regloc import * from pypy.jit.backend.x86.regalloc import X86FrameManager from pypy.jit.backend.x86.jump import remap_frame_layout +from pypy.jit.metainterp.history import INT frame_pos = X86FrameManager.frame_pos @@ -25,7 +26,7 @@ continue assert len(op1) == len(op2) for x, y in zip(op1, op2): - if isinstance(x, MODRM) and isinstance(y, MODRM): + if isinstance(x, StackLoc) and isinstance(y, MODRM): assert x.byte == y.byte assert x.extradata == y.extradata else: @@ -41,9 +42,9 @@ remap_frame_layout(assembler, [eax, ebx, ecx, edx, esi, edi], [eax, ebx, ecx, edx, esi, edi], '?') assert assembler.ops == [] - s8 = frame_pos(1, 1) - s12 = frame_pos(31, 1) - s20 = frame_pos(6, 1) + s8 = frame_pos(1, INT) + s12 = frame_pos(31, INT) + s20 = frame_pos(6, INT) remap_frame_layout(assembler, [eax, ebx, ecx, s20, s8, edx, s12, esi, edi], [eax, ebx, ecx, s20, s8, edx, s12, esi, edi], '?') @@ -58,10 +59,10 @@ def test_simple_framelocs(): assembler = MockAssembler() - s8 = frame_pos(0, 1) - s12 = frame_pos(13, 1) - s20 = frame_pos(20, 1) - s24 = frame_pos(221, 1) + s8 = frame_pos(0, INT) + s12 = frame_pos(13, INT) + s20 = frame_pos(20, INT) + s24 = frame_pos(221, INT) remap_frame_layout(assembler, [s8, eax, s12], [s20, s24, edi], edx) assert assembler.ops == [('mov', s8, edx), ('mov', edx, s20), @@ -70,10 +71,10 @@ def test_reordering(): assembler = MockAssembler() - s8 = frame_pos(8, 1) - s12 = frame_pos(12, 1) - s20 = frame_pos(19, 1) - s24 = frame_pos(1, 1) + s8 = frame_pos(8, INT) + s12 = frame_pos(12, INT) + s20 = frame_pos(19, INT) + s24 = frame_pos(1, INT) remap_frame_layout(assembler, [eax, s8, s20, ebx], [s8, ebx, eax, edi], '?') assert assembler.got([('mov', ebx, edi), @@ -83,10 +84,10 @@ def test_cycle(): assembler = MockAssembler() - s8 = frame_pos(8, 1) - s12 = frame_pos(12, 1) - s20 = frame_pos(19, 1) - s24 = frame_pos(1, 1) + s8 = frame_pos(8, INT) + s12 = frame_pos(12, INT) + s20 = frame_pos(19, INT) + s24 = frame_pos(1, INT) remap_frame_layout(assembler, [eax, s8, s20, ebx], [s8, ebx, eax, s20], '?') assert assembler.got([('push', s8), @@ -97,12 +98,12 @@ def test_cycle_2(): assembler = MockAssembler() - s8 = frame_pos(8, 1) - s12 = frame_pos(12, 1) - s20 = frame_pos(19, 1) - s24 = frame_pos(1, 1) - s2 = frame_pos(2, 1) - s3 = frame_pos(3, 1) + s8 = frame_pos(8, INT) + s12 = frame_pos(12, INT) + s20 = frame_pos(19, INT) + s24 = frame_pos(1, INT) + s2 = frame_pos(2, INT) + s3 = frame_pos(3, INT) remap_frame_layout(assembler, [eax, s8, edi, s20, eax, s20, s24, esi, s2, s3], [s8, s20, edi, eax, edx, s24, ebx, s12, s3, s2], @@ -127,14 +128,14 @@ remap_frame_layout(assembler, [c3], [eax], '?') assert assembler.ops == [('mov', c3, eax)] assembler = MockAssembler() - s12 = frame_pos(12, 1) + s12 = frame_pos(12, INT) remap_frame_layout(assembler, [c3], [s12], '?') assert assembler.ops == [('mov', c3, s12)] def test_constants_and_cycle(): assembler = MockAssembler() c3 = imm(3) - s12 = frame_pos(13, 1) + s12 = frame_pos(13, INT) remap_frame_layout(assembler, [ebx, c3, s12], [s12, eax, ebx], edi) assert assembler.ops == [('mov', c3, eax), Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_recompilation.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_recompilation.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_recompilation.py Thu Sep 9 01:00:13 2010 @@ -1,6 +1,5 @@ - -from pypy.jit.backend.x86.runner import CPU from pypy.jit.backend.x86.test.test_regalloc import BaseTestRegalloc +from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64 class TestRecompilation(BaseTestRegalloc): def test_compile_bridge_not_deeper(self): @@ -51,7 +50,9 @@ descr = loop.operations[2].descr new = descr._x86_bridge_frame_depth assert descr._x86_bridge_param_depth == 0 - assert new > previous + # XXX: Maybe add enough ops to force stack on 64-bit as well? + if IS_X86_32: + assert new > previous self.cpu.set_future_value_int(0, 0) fail = self.run(loop) assert fail.identifier == 2 @@ -111,7 +112,9 @@ guard_op = loop.operations[5] loop_frame_depth = loop.token._x86_frame_depth assert loop.token._x86_param_depth == 0 - assert guard_op.descr._x86_bridge_frame_depth > loop_frame_depth + # XXX: Maybe add enough ops to force stack on 64-bit as well? + if IS_X86_32: + assert guard_op.descr._x86_bridge_frame_depth > loop_frame_depth assert guard_op.descr._x86_bridge_param_depth == 0 self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 0) Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regalloc.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regalloc.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regalloc.py Thu Sep 9 01:00:13 2010 @@ -7,15 +7,17 @@ BoxPtr, ConstPtr, LoopToken, BasicFailDescr from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.backend.llsupport.descr import GcCache -from pypy.jit.backend.x86.runner import CPU -from pypy.jit.backend.x86.regalloc import RegAlloc, WORD, X86RegisterManager,\ +from pypy.jit.backend.detect_cpu import getcpuclass +from pypy.jit.backend.x86.regalloc import RegAlloc, X86RegisterManager,\ FloatConstants +from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64 from pypy.jit.metainterp.test.oparser import parse from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.annlowlevel import llhelper from pypy.rpython.lltypesystem import rclass, rstr -from pypy.jit.backend.x86.ri386 import * +from pypy.jit.backend.x86.rx86 import * +CPU = getcpuclass() class MockGcDescr(GcCache): def get_funcptr_for_new(self): return 123 @@ -92,13 +94,20 @@ def f2(x, y): return x*y + def f10(*args): + assert len(args) == 10 + return sum(args) + F1PTR = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Signed)) F2PTR = lltype.Ptr(lltype.FuncType([lltype.Signed]*2, lltype.Signed)) + F10PTR = lltype.Ptr(lltype.FuncType([lltype.Signed]*10, lltype.Signed)) f1ptr = llhelper(F1PTR, f1) f2ptr = llhelper(F2PTR, f2) + f10ptr = llhelper(F10PTR, f10) f1_calldescr = cpu.calldescrof(F1PTR.TO, F1PTR.TO.ARGS, F1PTR.TO.RESULT) f2_calldescr = cpu.calldescrof(F2PTR.TO, F2PTR.TO.ARGS, F2PTR.TO.RESULT) + f10_calldescr = cpu.calldescrof(F10PTR.TO, F10PTR.TO.ARGS, F10PTR.TO.RESULT) namespace = locals().copy() type_system = 'lltype' @@ -541,6 +550,12 @@ assert self.getints(9) == [0, 1, 1, 1, 1, 1, 1, 1, 1] class TestRegAllocCallAndStackDepth(BaseTestRegalloc): + def expected_param_depth(self, num_args): + # Assumes the arguments are all non-float + if IS_X86_32: + return num_args + elif IS_X86_64: + return max(num_args - 6, 0) def test_one_call(self): ops = ''' @@ -550,7 +565,7 @@ ''' loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] - assert loop.token._x86_param_depth == 1 + assert loop.token._x86_param_depth == self.expected_param_depth(1) def test_two_calls(self): ops = ''' @@ -561,8 +576,21 @@ ''' loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) assert self.getints(11) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] - assert loop.token._x86_param_depth == 2 - + assert loop.token._x86_param_depth == self.expected_param_depth(2) + + def test_call_many_arguments(self): + # NB: The first and last arguments in the call are constants. This + # is primarily for x86-64, to ensure that loading a constant to an + # argument register or to the stack works correctly + ops = ''' + [i0, i1, i2, i3, i4, i5, i6, i7] + i8 = call(ConstClass(f10ptr), 1, i0, i1, i2, i3, i4, i5, i6, i7, 10, descr=f10_calldescr) + finish(i8) + ''' + loop = self.interpret(ops, [2, 3, 4, 5, 6, 7, 8, 9]) + assert self.getint(0) == 55 + assert loop.token._x86_param_depth == self.expected_param_depth(10) + def test_bridge_calls_1(self): ops = ''' [i0, i1] @@ -579,7 +607,7 @@ ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].descr._x86_bridge_param_depth == 2 + assert loop.operations[-2].descr._x86_bridge_param_depth == self.expected_param_depth(2) self.cpu.set_future_value_int(0, 4) self.cpu.set_future_value_int(1, 7) @@ -602,7 +630,7 @@ ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].descr._x86_bridge_param_depth == 2 + assert loop.operations[-2].descr._x86_bridge_param_depth == self.expected_param_depth(2) self.cpu.set_future_value_int(0, 4) self.cpu.set_future_value_int(1, 7) Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regalloc2.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regalloc2.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regalloc2.py Thu Sep 9 01:00:13 2010 @@ -2,7 +2,9 @@ from pypy.jit.metainterp.history import ResOperation, BoxInt, ConstInt,\ BoxPtr, ConstPtr, BasicFailDescr, LoopToken from pypy.jit.metainterp.resoperation import rop -from pypy.jit.backend.x86.runner import CPU +from pypy.jit.backend.detect_cpu import getcpuclass +from pypy.jit.backend.x86.arch import WORD +CPU = getcpuclass() def test_bug_rshift(): v1 = BoxInt() @@ -281,5 +283,8 @@ assert cpu.get_latest_value_int(16) == -57344 assert cpu.get_latest_value_int(17) == 1 assert cpu.get_latest_value_int(18) == -1 - assert cpu.get_latest_value_int(19) == -2147483648 + if WORD == 4: + assert cpu.get_latest_value_int(19) == -2147483648 + elif WORD == 8: + assert cpu.get_latest_value_int(19) == 19327352832 assert cpu.get_latest_value_int(20) == -49 Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_runner.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_runner.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_runner.py Thu Sep 9 01:00:13 2010 @@ -1,16 +1,22 @@ import py from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rstr, rclass +from pypy.rpython.annlowlevel import llhelper from pypy.jit.metainterp.history import ResOperation, LoopToken -from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstPtr, - Box, BasicFailDescr) -from pypy.jit.backend.x86.runner import CPU -from pypy.jit.backend.x86.regalloc import WORD +from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstFloat, + ConstPtr, Box, BoxFloat, BasicFailDescr) +from pypy.jit.backend.detect_cpu import getcpuclass +from pypy.jit.backend.x86.arch import WORD from pypy.jit.backend.llsupport import symbolic from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp.executor import execute from pypy.jit.backend.test.runner_test import LLtypeBackendTest +from pypy.jit.metainterp.test.oparser import parse +from pypy.tool.udir import udir import ctypes import sys +import os + +CPU = getcpuclass() class FakeStats(object): pass @@ -56,7 +62,7 @@ assert u.chars[3] == u'd' @staticmethod - def _resbuf(res, item_tp=ctypes.c_int): + def _resbuf(res, item_tp=ctypes.c_long): return ctypes.cast(res.value._obj.intval, ctypes.POINTER(item_tp)) def test_allocations(self): @@ -71,8 +77,11 @@ return ctypes.cast(buf, ctypes.c_void_p).value func = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int)(f) addr = ctypes.cast(func, ctypes.c_void_p).value + # ctypes produces an unsigned value. We need it to be signed for, eg, + # relative addressing to work properly. + addr = rffi.cast(lltype.Signed, addr) - self.cpu.assembler.make_sure_mc_exists() + self.cpu.assembler.setup() self.cpu.assembler.malloc_func_addr = addr ofs = symbolic.get_field_token(rstr.STR, 'chars', False)[0] @@ -184,6 +193,7 @@ def test_getfield_setfield(self): TP = lltype.GcStruct('x', ('s', lltype.Signed), + ('i', rffi.INT), ('f', lltype.Float), ('u', rffi.USHORT), ('c1', lltype.Char), @@ -192,6 +202,7 @@ res = self.execute_operation(rop.NEW, [], 'ref', self.cpu.sizeof(TP)) ofs_s = self.cpu.fielddescrof(TP, 's') + ofs_i = self.cpu.fielddescrof(TP, 'i') #ofs_f = self.cpu.fielddescrof(TP, 'f') ofs_u = self.cpu.fielddescrof(TP, 'u') ofsc1 = self.cpu.fielddescrof(TP, 'c1') @@ -209,6 +220,11 @@ ofs_s) s = self.execute_operation(rop.GETFIELD_GC, [res], 'int', ofs_s) assert s.value == 3 + + self.execute_operation(rop.SETFIELD_GC, [res, BoxInt(1234)], 'void', ofs_i) + i = self.execute_operation(rop.GETFIELD_GC, [res], 'int', ofs_i) + assert i.value == 1234 + #u = self.execute_operation(rop.GETFIELD_GC, [res, ofs_u], 'int') #assert u.value == 5 self.execute_operation(rop.SETFIELD_GC, [res, ConstInt(1)], 'void', @@ -357,7 +373,9 @@ self.cpu.compile_bridge(faildescr1, [i1b], bridge) name, address, size = agent.functions[1] assert name == "Bridge # 0: bye" - assert address == loopaddress + loopsize + # Would be exactly ==, but there are some guard failure recovery + # stubs in-between + assert address >= loopaddress + loopsize assert size >= 10 # randomish number self.cpu.set_future_value_int(0, 2) @@ -366,6 +384,19 @@ res = self.cpu.get_latest_value_int(0) assert res == 20 + def test_call_with_const_floats(self): + def func(f1, f2): + return f1 + f2 + + FUNC = self.FuncType([lltype.Float, lltype.Float], lltype.Float) + FPTR = self.Ptr(FUNC) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + func_ptr = llhelper(FPTR, func) + funcbox = self.get_funcbox(self.cpu, func_ptr) + res = self.execute_operation(rop.CALL, [funcbox, ConstFloat(1.5), ConstFloat(2.5)], 'float', descr=calldescr) + assert res.value == 4.0 + + class TestX86OverflowMC(TestX86): def setup_method(self, meth): @@ -383,10 +414,100 @@ ops.append(ResOperation(rop.FINISH, [v], None, descr=BasicFailDescr())) looptoken = LoopToken() - self.cpu.assembler.make_sure_mc_exists() + self.cpu.assembler.setup() old_mc_mc = self.cpu.assembler.mc._mc self.cpu.compile_loop([base_v], ops, looptoken) assert self.cpu.assembler.mc._mc != old_mc_mc # overflowed self.cpu.set_future_value_int(0, base_v.value) self.cpu.execute_token(looptoken) assert self.cpu.get_latest_value_int(0) == 1024 + + def test_overflow_guard_float_cmp(self): + # The float comparisons on x86 tend to use small relative jumps, + # which may run into trouble if they fall on the edge of a + # MachineCodeBlock change. + a = BoxFloat(1.0) + b = BoxFloat(2.0) + failed = BoxInt(41) + finished = BoxInt(42) + + # We select guards that will always succeed, so that execution will + # continue through the entire set of comparisions + ops_to_test = ( + (rop.FLOAT_LT, [a, b], rop.GUARD_TRUE), + (rop.FLOAT_LT, [b, a], rop.GUARD_FALSE), + + (rop.FLOAT_LE, [a, a], rop.GUARD_TRUE), + (rop.FLOAT_LE, [a, b], rop.GUARD_TRUE), + (rop.FLOAT_LE, [b, a], rop.GUARD_FALSE), + + (rop.FLOAT_EQ, [a, a], rop.GUARD_TRUE), + (rop.FLOAT_EQ, [a, b], rop.GUARD_FALSE), + + (rop.FLOAT_NE, [a, b], rop.GUARD_TRUE), + (rop.FLOAT_NE, [a, a], rop.GUARD_FALSE), + + (rop.FLOAT_GT, [b, a], rop.GUARD_TRUE), + (rop.FLOAT_GT, [a, b], rop.GUARD_FALSE), + + (rop.FLOAT_GE, [a, a], rop.GUARD_TRUE), + (rop.FLOAT_GE, [b, a], rop.GUARD_TRUE), + (rop.FLOAT_GE, [a, b], rop.GUARD_FALSE), + ) + + for float_op, args, guard_op in ops_to_test: + ops = [] + + for i in range(200): + cmp_result = BoxInt() + ops.append(ResOperation(float_op, args, cmp_result)) + ops.append(ResOperation(guard_op, [cmp_result], None, descr=BasicFailDescr())) + ops[-1].fail_args = [failed] + + ops.append(ResOperation(rop.FINISH, [finished], None, descr=BasicFailDescr())) + + looptoken = LoopToken() + self.cpu.compile_loop([a, b, failed, finished], ops, looptoken) + self.cpu.set_future_value_float(0, a.value) + self.cpu.set_future_value_float(1, b.value) + self.cpu.set_future_value_int(2, failed.value) + self.cpu.set_future_value_int(3, finished.value) + self.cpu.execute_token(looptoken) + + # Really just a sanity check. We're actually interested in + # whether the test segfaults. + assert self.cpu.get_latest_value_int(0) == finished.value + + +class TestDebuggingAssembler(object): + def setup_method(self, meth): + self.pypylog = os.environ.get('PYPYLOG', None) + self.logfile = str(udir.join('x86_runner.log')) + os.environ['PYPYLOG'] = "mumble:" + self.logfile + self.cpu = CPU(rtyper=None, stats=FakeStats()) + + def teardown_method(self, meth): + if self.pypylog is not None: + os.environ['PYPYLOG'] = self.pypylog + + def test_debugger_on(self): + loop = """ + [i0] + debug_merge_point('xyz') + i1 = int_add(i0, 1) + i2 = int_ge(i1, 10) + guard_false(i2) [] + jump(i1) + """ + ops = parse(loop) + self.cpu.assembler.set_debug(True) + self.cpu.compile_loop(ops.inputargs, ops.operations, ops.token) + self.cpu.set_future_value_int(0, 0) + self.cpu.execute_token(ops.token) + # check debugging info + name, struct = self.cpu.assembler.loop_run_counters[0] + assert name == 'xyz' + assert struct.i == 10 + self.cpu.finish_once() + lines = py.path.local(self.logfile + ".count").readlines() + assert lines[0] == '10 xyz\n' Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_symbolic_x86.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_symbolic_x86.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_symbolic_x86.py Thu Sep 9 01:00:13 2010 @@ -1,6 +1,7 @@ import py from pypy.jit.backend.llsupport.symbolic import * from pypy.rpython.lltypesystem import lltype, rffi +from pypy.jit.backend.x86.arch import WORD # This test file is here and not in llsupport/test/ because it checks # that we get correct numbers for a 32-bit machine. @@ -19,32 +20,32 @@ ofs_z, size_z = get_field_token(S, 'z', False) # ofs_x might be 0 or not, depending on how we count the headers # but the rest should be as expected for a 386 machine - assert size_x == size_y == size_z == 4 + assert size_x == size_y == size_z == WORD assert ofs_x >= 0 - assert ofs_y == ofs_x + 4 - assert ofs_z == ofs_x + 8 + assert ofs_y == ofs_x + WORD + assert ofs_z == ofs_x + (WORD*2) def test_struct_size(): ofs_z, size_z = get_field_token(S, 'z', False) totalsize = get_size(S, False) - assert totalsize == ofs_z + 4 + assert totalsize == ofs_z + WORD def test_primitive_size(): - assert get_size(lltype.Signed, False) == 4 + assert get_size(lltype.Signed, False) == WORD assert get_size(lltype.Char, False) == 1 - assert get_size(lltype.Ptr(S), False) == 4 + assert get_size(lltype.Ptr(S), False) == WORD def test_array_token(): A = lltype.GcArray(lltype.Char) basesize, itemsize, ofs_length = get_array_token(A, False) - assert basesize >= 4 # at least the 'length', maybe some gc headers + assert basesize >= WORD # at least the 'length', maybe some gc headers assert itemsize == 1 - assert ofs_length == basesize - 4 + assert ofs_length == basesize - WORD A = lltype.GcArray(lltype.Signed) basesize, itemsize, ofs_length = get_array_token(A, False) - assert basesize >= 4 # at least the 'length', maybe some gc headers - assert itemsize == 4 - assert ofs_length == basesize - 4 + assert basesize >= WORD # at least the 'length', maybe some gc headers + assert itemsize == WORD + assert ofs_length == basesize - WORD def test_varsized_struct_size(): S1 = lltype.GcStruct('S1', ('parent', S), @@ -54,9 +55,9 @@ ofs_extra, size_extra = get_field_token(S1, 'extra', False) basesize, itemsize, ofs_length = get_array_token(S1, False) assert size_parent == ofs_extra - assert size_extra == 4 - assert ofs_length == ofs_extra + 4 - assert basesize == ofs_length + 4 + assert size_extra == WORD + assert ofs_length == ofs_extra + WORD + assert basesize == ofs_length + WORD assert itemsize == 1 def test_string(): Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_zll_random.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_zll_random.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_zll_random.py Thu Sep 9 01:00:13 2010 @@ -1,9 +1,11 @@ from pypy.jit.backend.test.test_random import check_random_function, Random from pypy.jit.backend.test.test_ll_random import LLtypeOperationBuilder -from pypy.jit.backend.x86.runner import CPU386 +from pypy.jit.backend.detect_cpu import getcpuclass + +CPU = getcpuclass() def test_stress(): - cpu = CPU386(None, None) + cpu = CPU(None, None) r = Random() for i in range(1000): check_random_function(cpu, LLtypeOperationBuilder, r, i, 1000) Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_zrpy_gc.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_zrpy_gc.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_zrpy_gc.py Thu Sep 9 01:00:13 2010 @@ -17,6 +17,8 @@ from pypy.jit.backend.llsupport.gc import GcRefList, GcRootMap_asmgcc from pypy.jit.backend.llsupport.gc import GcLLDescr_framework from pypy.tool.udir import udir +from pypy.jit.backend.x86.arch import IS_X86_64 +import py.test class X(object): def __init__(self, x=0): Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_ztranslation.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_ztranslation.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_ztranslation.py Thu Sep 9 01:00:13 2010 @@ -3,13 +3,14 @@ from pypy.rlib.jit import JitDriver, OPTIMIZER_FULL, unroll_parameters from pypy.rlib.jit import PARAMETERS, dont_look_inside from pypy.jit.metainterp.jitprof import Profiler -from pypy.jit.backend.x86.runner import CPU386 +from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.test.support import CCompiledMixin from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.translator.translator import TranslationContext +from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64 class TestTranslationX86(CCompiledMixin): - CPUClass = CPU386 + CPUClass = getcpuclass() def _check_cbuilder(self, cbuilder): # We assume here that we have sse2. If not, the CPUClass @@ -74,8 +75,7 @@ driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], - get_printable_location = lambda codeno : str(codeno), - can_inline = lambda codeno : False) + get_printable_location = lambda codeno: str(codeno)) class SomewhereElse(object): pass @@ -114,7 +114,7 @@ class TestTranslationRemoveTypePtrX86(CCompiledMixin): - CPUClass = CPU386 + CPUClass = getcpuclass() def _get_TranslationContext(self): t = TranslationContext() Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/tool/viewcode.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/tool/viewcode.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/tool/viewcode.py Thu Sep 9 01:00:13 2010 @@ -31,16 +31,23 @@ if sys.platform == "win32": XXX # lots more in Psyco -def machine_code_dump(data, originaddr): - # the disassembler to use. 'objdump' writes GNU-style instructions. - # 'ndisasm' would use Intel syntax, but you need to fix the output parsing. - objdump = ('objdump -M intel -b binary -m i386 ' +def machine_code_dump(data, originaddr, backend_name): + objdump_backend_option = { + 'x86': 'i386', + 'x86_64': 'x86-64', + 'i386': 'i386', + } + objdump = ('objdump -M intel,%(backend)s -b binary -m i386 ' '--adjust-vma=%(origin)d -D %(file)s') # f = open(tmpfile, 'wb') f.write(data) f.close() - g = os.popen(objdump % {'file': tmpfile, 'origin': originaddr}, 'r') + g = os.popen(objdump % { + 'file': tmpfile, + 'origin': originaddr, + 'backend': objdump_backend_option[backend_name], + }, 'r') result = g.readlines() g.close() return result[6:] # drop some objdump cruft @@ -126,7 +133,7 @@ def disassemble(self): if not hasattr(self, 'text'): - lines = machine_code_dump(self.data, self.addr) + lines = machine_code_dump(self.data, self.addr, self.world.backend_name) # instead of adding symbol names in the dumps we could # also make the 0xNNNNNNNN addresses be red and show the # symbol name when the mouse is over them @@ -171,10 +178,13 @@ self.jumps = {} self.symbols = {} self.logentries = {} + self.backend_name = None def parse(self, f, textonly=True): for line in f: - if line.startswith('CODE_DUMP '): + if line.startswith('BACKEND '): + self.backend_name = line.split(' ')[1].strip() + elif line.startswith('CODE_DUMP '): pieces = line.split() assert pieces[1].startswith('@') assert pieces[2].startswith('+') Modified: pypy/branch/fast-forward/pypy/jit/codewriter/assembler.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/codewriter/assembler.py (original) +++ pypy/branch/fast-forward/pypy/jit/codewriter/assembler.py Thu Sep 9 01:00:13 2010 @@ -53,6 +53,7 @@ self.liveness = {} self.startpoints = set() self.alllabels = set() + self.resulttypes = {} def emit_reg(self, reg): if reg.index >= self.count_regs[reg.kind]: @@ -165,7 +166,9 @@ raise NotImplementedError(x) # opname = insn[0] - assert '>' not in argcodes or argcodes.index('>') == len(argcodes) - 2 + if '>' in argcodes: + assert argcodes.index('>') == len(argcodes) - 2 + self.resulttypes[len(self.code)] = argcodes[-1] key = opname + '/' + ''.join(argcodes) num = self.insns.setdefault(key, len(self.insns)) self.code[startposition] = chr(num) @@ -212,7 +215,8 @@ self.count_regs['float'], liveness=self.liveness, startpoints=self.startpoints, - alllabels=self.alllabels) + alllabels=self.alllabels, + resulttypes=self.resulttypes) def see_raw_object(self, value): if value._obj not in self._seen_raw_objects: Modified: pypy/branch/fast-forward/pypy/jit/codewriter/jitcode.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/codewriter/jitcode.py (original) +++ pypy/branch/fast-forward/pypy/jit/codewriter/jitcode.py Thu Sep 9 01:00:13 2010 @@ -19,7 +19,8 @@ def setup(self, code='', constants_i=[], constants_r=[], constants_f=[], num_regs_i=255, num_regs_r=255, num_regs_f=255, - liveness=None, startpoints=None, alllabels=None): + liveness=None, startpoints=None, alllabels=None, + resulttypes=None): self.code = code # if the following lists are empty, use a single shared empty list self.constants_i = constants_i or self._empty_i @@ -33,6 +34,7 @@ self.liveness = make_liveness_cache(liveness) self._startpoints = startpoints # debugging self._alllabels = alllabels # debugging + self._resulttypes = resulttypes # debugging def get_fnaddr_as_int(self): return heaptracker.adr2int(self.fnaddr) Modified: pypy/branch/fast-forward/pypy/jit/codewriter/jtransform.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/codewriter/jtransform.py (original) +++ pypy/branch/fast-forward/pypy/jit/codewriter/jtransform.py Thu Sep 9 01:00:13 2010 @@ -511,14 +511,11 @@ arraydescr) return [] # check for deepfrozen structures that force constant-folding - hints = v_inst.concretetype.TO._hints - accessor = hints.get("immutable_fields") - if accessor and c_fieldname.value in accessor.fields: + immut = v_inst.concretetype.TO._immutable_field(c_fieldname.value) + if immut: pure = '_pure' - if accessor.fields[c_fieldname.value] == "[*]": + if immut == "[*]": self.immutable_arrays[op.result] = True - elif hints.get('immutable'): - pure = '_pure' else: pure = '' argname = getattr(v_inst.concretetype.TO, '_gckind', 'gc') @@ -829,13 +826,20 @@ self.make_three_lists(op.args[2:2+num_green_args]) + self.make_three_lists(op.args[2+num_green_args:])) op1 = SpaceOperation('jit_merge_point', args, None) - return ops + [op1] + op2 = SpaceOperation('-live-', [], None) + # ^^^ we need a -live- for the case of do_recursive_call() + return ops + [op1, op2] - def handle_jit_marker__can_enter_jit(self, op, jitdriver): + def handle_jit_marker__loop_header(self, op, jitdriver): jd = self.callcontrol.jitdriver_sd_from_jitdriver(jitdriver) assert jd is not None c_index = Constant(jd.index, lltype.Signed) - return SpaceOperation('can_enter_jit', [c_index], None) + return SpaceOperation('loop_header', [c_index], None) + + # a 'can_enter_jit' in the source graph becomes a 'loop_header' + # operation in the transformed graph, as its only purpose in + # the transformed graph is to detect loops. + handle_jit_marker__can_enter_jit = handle_jit_marker__loop_header def rewrite_op_debug_assert(self, op): log.WARNING("found debug_assert in %r; should have be removed" % Modified: pypy/branch/fast-forward/pypy/jit/codewriter/test/test_flatten.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/codewriter/test/test_flatten.py (original) +++ pypy/branch/fast-forward/pypy/jit/codewriter/test/test_flatten.py Thu Sep 9 01:00:13 2010 @@ -593,7 +593,8 @@ -live- %i0, %i1 int_guard_value %i0 jit_merge_point $27, I[%i0], R[], F[], I[%i1], R[], F[] - can_enter_jit $27 + -live- + loop_header $27 void_return """, transform=True, liveness=True, cc=MyFakeCallControl(), jd=jd) Modified: pypy/branch/fast-forward/pypy/jit/metainterp/blackhole.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/blackhole.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/blackhole.py Thu Sep 9 01:00:13 2010 @@ -2,7 +2,7 @@ from pypy.rlib.rarithmetic import intmask, LONG_BIT, r_uint, ovfcheck from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import debug_start, debug_stop -from pypy.rlib.debug import make_sure_not_resized +from pypy.rlib.debug import make_sure_not_resized, fatalerror from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.llinterp import LLException @@ -756,11 +756,15 @@ assert e reraise(e) + @arguments("r") + def bhimpl_debug_fatalerror(msg): + llop.debug_fatalerror(lltype.Void, msg) + # ---------- # the main hints and recursive calls @arguments("i") - def bhimpl_can_enter_jit(jdindex): + def bhimpl_loop_header(jdindex): pass @arguments("self", "i", "I", "R", "F", "I", "R", "F") @@ -1164,7 +1168,7 @@ # we now proceed to interpret the bytecode in this frame self.run() # - except JitException: + except JitException, e: raise # go through except Exception, e: # if we get an exception, return it to the caller frame @@ -1266,6 +1270,33 @@ e = lltype.cast_opaque_ptr(llmemory.GCREF, e) raise sd.ExitFrameWithExceptionRef(self.cpu, e) + def _handle_jitexception_in_portal(self, e): + # This case is really rare, but can occur if + # convert_and_run_from_pyjitpl() gets called in this situation: + # + # [function 1] <---- top BlackholeInterpreter() + # [recursive portal jit code] + # ... + # [bottom portal jit code] <---- bottom BlackholeInterpreter() + # + # and then "function 1" contains a call to "function 2", which + # calls "can_enter_jit". The latter can terminate by raising a + # JitException. In that case, the JitException is not supposed + # to fall through the whole chain of BlackholeInterpreters, but + # be caught and handled just below the level "recursive portal + # jit code". The present function is called to handle the case + # of recursive portal jit codes. + for jd in self.builder.metainterp_sd.jitdrivers_sd: + if jd.mainjitcode is self.jitcode: + break + else: + assert 0, "portal jitcode not found??" + # call the helper in warmspot.py. It might either raise a + # regular exception (which should then be propagated outside + # of 'self', not caught inside), or return (the return value + # gets stored in nextblackholeinterp). + jd.handle_jitexc_from_bh(self.nextblackholeinterp, e) + def _copy_data_from_miframe(self, miframe): self.setposition(miframe.jitcode, miframe.pc) for i in range(self.jitcode.num_regs_i()): @@ -1287,9 +1318,31 @@ while True: try: current_exc = blackholeinterp._resume_mainloop(current_exc) - finally: - blackholeinterp.builder.release_interp(blackholeinterp) + except JitException, e: + blackholeinterp, current_exc = _handle_jitexception( + blackholeinterp, e) + blackholeinterp.builder.release_interp(blackholeinterp) + blackholeinterp = blackholeinterp.nextblackholeinterp + +def _handle_jitexception(blackholeinterp, jitexc): + # See comments in _handle_jitexception_in_portal(). + while not blackholeinterp.jitcode.is_portal: + blackholeinterp.builder.release_interp(blackholeinterp) blackholeinterp = blackholeinterp.nextblackholeinterp + if blackholeinterp.nextblackholeinterp is None: + blackholeinterp.builder.release_interp(blackholeinterp) + raise jitexc # bottommost entry: go through + # We have reached a recursive portal level. + try: + blackholeinterp._handle_jitexception_in_portal(jitexc) + except Exception, e: + # It raised a general exception (it should not be a JitException here). + lle = get_llexception(blackholeinterp.cpu, e) + else: + # It set up the nextblackholeinterp to contain the return value. + lle = lltype.nullptr(rclass.OBJECTPTR.TO) + # We will continue to loop in _run_forever() from the parent level. + return blackholeinterp, lle def resume_in_blackhole(metainterp_sd, jitdriver_sd, resumedescr, all_virtuals=None): Modified: pypy/branch/fast-forward/pypy/jit/metainterp/executor.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/executor.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/executor.py Thu Sep 9 01:00:13 2010 @@ -91,7 +91,7 @@ return BoxInt(cpu.bh_getarrayitem_gc_i(arraydescr, array, index)) def do_getarrayitem_raw(cpu, _, arraybox, indexbox, arraydescr): - array = arraybox.getref_base() + array = arraybox.getint() index = indexbox.getint() assert not arraydescr.is_array_of_pointers() if arraydescr.is_array_of_floats(): @@ -172,34 +172,36 @@ [x1box.getref_base(), x2box.getref_base()], None) def do_int_add_ovf(cpu, metainterp, box1, box2): - assert metainterp is not None + # the overflow operations can be called without a metainterp, if an + # overflow cannot occur a = box1.getint() b = box2.getint() try: z = ovfcheck(a + b) except OverflowError: + assert metainterp is not None metainterp.execute_raised(OverflowError(), constant=True) z = 0 return BoxInt(z) def do_int_sub_ovf(cpu, metainterp, box1, box2): - assert metainterp is not None a = box1.getint() b = box2.getint() try: z = ovfcheck(a - b) except OverflowError: + assert metainterp is not None metainterp.execute_raised(OverflowError(), constant=True) z = 0 return BoxInt(z) def do_int_mul_ovf(cpu, metainterp, box1, box2): - assert metainterp is not None a = box1.getint() b = box2.getint() try: z = ovfcheck(a * b) except OverflowError: + assert metainterp is not None metainterp.execute_raised(OverflowError(), constant=True) z = 0 return BoxInt(z) Modified: pypy/branch/fast-forward/pypy/jit/metainterp/history.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/history.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/history.py Thu Sep 9 01:00:13 2010 @@ -919,11 +919,12 @@ "found %d %r, expected %d" % (found, insn, expected_count)) return insns - def check_loops(self, expected=None, **check): + def check_loops(self, expected=None, everywhere=False, **check): insns = {} for loop in self.loops: - if getattr(loop, '_ignore_during_counting', False): - continue + if not everywhere: + if getattr(loop, '_ignore_during_counting', False): + continue insns = loop.summary(adding_insns=insns) if expected is not None: insns.pop('debug_merge_point', None) Modified: pypy/branch/fast-forward/pypy/jit/metainterp/jitdriver.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/jitdriver.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/jitdriver.py Thu Sep 9 01:00:13 2010 @@ -12,6 +12,7 @@ # self.result_type ... pypy.jit.metainterp.warmspot # self.virtualizable_info... pypy.jit.metainterp.warmspot # self.warmstate ... pypy.jit.metainterp.warmspot + # self.handle_jitexc_from_bh pypy.jit.metainterp.warmspot # self.index ... pypy.jit.codewriter.call # self.mainjitcode ... pypy.jit.codewriter.call Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt.py Thu Sep 9 01:00:13 2010 @@ -1,7 +1,7 @@ from pypy.jit.metainterp.history import Box, BoxInt, LoopToken, BoxFloat,\ ConstFloat from pypy.jit.metainterp.history import Const, ConstInt, ConstPtr, ConstObj, REF -from pypy.jit.metainterp.resoperation import rop, ResOperation +from pypy.jit.metainterp.resoperation import rop, ResOperation, opboolinvers, opboolreflex from pypy.jit.metainterp import jitprof from pypy.jit.metainterp.executor import execute_nonspec from pypy.jit.metainterp.specnode import SpecNode, NotSpecNode, ConstantSpecNode @@ -18,6 +18,7 @@ from pypy.rpython.lltypesystem import lltype from pypy.jit.metainterp.history import AbstractDescr, make_hashable_int + def optimize_loop_1(metainterp_sd, loop): """Optimize loop.operations to make it match the input of loop.specnodes and to remove internal overheadish operations. Note that loop.specnodes @@ -525,7 +526,9 @@ def propagate_forward(self): self.exception_might_have_happened = False self.newoperations = [] - for op in self.loop.operations: + self.i = 0 + while self.i < len(self.loop.operations): + op = self.loop.operations[self.i] opnum = op.opnum for value, func in optimize_ops: if opnum == value: @@ -533,6 +536,7 @@ break else: self.optimize_default(op) + self.i += 1 self.loop.operations = self.newoperations # accumulate counters self.resumedata_memo.update_counters(self.metainterp_sd.profiler) @@ -587,7 +591,12 @@ descr.make_a_counter_per_value(op) def optimize_default(self, op): - if op.is_always_pure(): + canfold = op.is_always_pure() + is_ovf = op.is_ovf() + if is_ovf: + nextop = self.loop.operations[self.i + 1] + canfold = nextop.opnum == rop.GUARD_NO_OVERFLOW + if canfold: for arg in op.args: if self.get_constant_box(arg) is None: break @@ -597,6 +606,8 @@ resbox = execute_nonspec(self.cpu, None, op.opnum, argboxes, op.descr) self.make_constant(op.result, resbox.constbox()) + if is_ovf: + self.i += 1 # skip next operation, it is the unneeded guard return # did we do the exact same operation already? @@ -610,6 +621,10 @@ if oldop is not None and oldop.descr is op.descr: assert oldop.opnum == op.opnum self.make_equal_to(op.result, self.getvalue(oldop.result)) + if is_ovf: + self.i += 1 # skip next operation, it is the unneeded guard + return + elif self.find_rewritable_bool(op, args): return else: self.pure_operations[args] = op @@ -617,6 +632,51 @@ # otherwise, the operation remains self.emit_operation(op) + + def try_boolinvers(self, op, targs): + oldop = self.pure_operations.get(targs, None) + if oldop is not None and oldop.descr is op.descr: + value = self.getvalue(oldop.result) + if value.is_constant(): + if value.box is CONST_1: + self.make_constant(op.result, CONST_0) + return True + elif value.box is CONST_0: + self.make_constant(op.result, CONST_1) + return True + return False + + + def find_rewritable_bool(self, op, args): + try: + oldopnum = opboolinvers[op.opnum] + targs = [args[0], args[1], ConstInt(oldopnum)] + if self.try_boolinvers(op, targs): + return True + except KeyError: + pass + + try: + oldopnum = opboolreflex[op.opnum] + targs = [args[1], args[0], ConstInt(oldopnum)] + oldop = self.pure_operations.get(targs, None) + if oldop is not None and oldop.descr is op.descr: + self.make_equal_to(op.result, self.getvalue(oldop.result)) + return True + except KeyError: + pass + + try: + oldopnum = opboolinvers[opboolreflex[op.opnum]] + targs = [args[1], args[0], ConstInt(oldopnum)] + if self.try_boolinvers(op, targs): + return True + except KeyError: + pass + + return False + + def optimize_JUMP(self, op): orgop = self.loop.operations[-1] exitargs = [] @@ -992,6 +1052,25 @@ self.make_equal_to(op.result, v1) else: self.optimize_default(op) + + def optimize_INT_SUB(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + if v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + return self.optimize_default(op) + + def optimize_INT_ADD(self, op): + v1 = self.getvalue(op.args[0]) + v2 = self.getvalue(op.args[1]) + # If one side of the op is 0 the result is the other side. + if v1.is_constant() and v1.box.getint() == 0: + self.make_equal_to(op.result, v2) + elif v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.optimize_default(op) optimize_ops = _findall(Optimizer, 'optimize_') Modified: pypy/branch/fast-forward/pypy/jit/metainterp/pyjitpl.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/pyjitpl.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/pyjitpl.py Thu Sep 9 01:00:13 2010 @@ -149,16 +149,20 @@ assert oldbox not in registers[count:] def make_result_of_lastop(self, resultbox): - if resultbox is None: - return + got_type = resultbox.type + if not we_are_translated(): + typeof = {'i': history.INT, + 'r': history.REF, + 'f': history.FLOAT} + assert typeof[self.jitcode._resulttypes[self.pc]] == got_type target_index = ord(self.bytecode[self.pc-1]) - if resultbox.type == history.INT: + if got_type == history.INT: self.registers_i[target_index] = resultbox - elif resultbox.type == history.REF: + elif got_type == history.REF: #debug_print(' ->', # llmemory.cast_ptr_to_adr(resultbox.getref_base())) self.registers_r[target_index] = resultbox - elif resultbox.type == history.FLOAT: + elif got_type == history.FLOAT: self.registers_f[target_index] = resultbox else: raise AssertionError("bad result box type") @@ -685,11 +689,11 @@ def _opimpl_recursive_call(self, jdindex, greenboxes, redboxes): targetjitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex] allboxes = greenboxes + redboxes - portal_code = targetjitdriver_sd.mainjitcode warmrunnerstate = targetjitdriver_sd.warmstate token = None if warmrunnerstate.inlining: if warmrunnerstate.can_inline_callable(greenboxes): + portal_code = targetjitdriver_sd.mainjitcode return self.metainterp.perform_call(portal_code, allboxes, greenkey=greenboxes) token = warmrunnerstate.get_assembler_token(greenboxes) @@ -697,6 +701,10 @@ # that assembler that we call is still correct self.verify_green_args(targetjitdriver_sd, greenboxes) # + return self.do_recursive_call(targetjitdriver_sd, allboxes, token) + + def do_recursive_call(self, targetjitdriver_sd, allboxes, token=None): + portal_code = targetjitdriver_sd.mainjitcode k = targetjitdriver_sd.portal_runner_adr funcbox = ConstInt(heaptracker.adr2int(k)) return self.do_residual_call(funcbox, portal_code.calldescr, @@ -786,13 +794,8 @@ return clsbox @arguments("int") - def opimpl_can_enter_jit(self, jdindex): - if self.metainterp.in_recursion: - from pypy.jit.metainterp.warmspot import CannotInlineCanEnterJit - raise CannotInlineCanEnterJit() - assert jdindex == self.metainterp.jitdriver_sd.index, ( - "found a can_enter_jit that does not match the current jitdriver") - self.metainterp.seen_can_enter_jit = True + def opimpl_loop_header(self, jdindex): + self.metainterp.seen_loop_header_for_jdindex = jdindex def verify_green_args(self, jitdriver_sd, varargs): num_green_args = jitdriver_sd.num_green_args @@ -806,22 +809,42 @@ self.verify_green_args(jitdriver_sd, greenboxes) # xxx we may disable the following line in some context later self.debug_merge_point(jitdriver_sd, greenboxes) - if self.metainterp.seen_can_enter_jit: - self.metainterp.seen_can_enter_jit = False - # Assert that it's impossible to arrive here with in_recursion - # set to a non-zero value: seen_can_enter_jit can only be set - # to True by opimpl_can_enter_jit, which should be executed - # just before opimpl_jit_merge_point (no recursion inbetween). - assert not self.metainterp.in_recursion + if self.metainterp.seen_loop_header_for_jdindex < 0: + return + # + assert self.metainterp.seen_loop_header_for_jdindex == jdindex, ( + "found a loop_header for a JitDriver that does not match " + "the following jit_merge_point's") + self.metainterp.seen_loop_header_for_jdindex = -1 + # + if not self.metainterp.in_recursion: assert jitdriver_sd is self.metainterp.jitdriver_sd # Set self.pc to point to jit_merge_point instead of just after: - # if reached_can_enter_jit() raises SwitchToBlackhole, then the + # if reached_loop_header() raises SwitchToBlackhole, then the # pc is still at the jit_merge_point, which is a point that is # much less expensive to blackhole out of. saved_pc = self.pc self.pc = orgpc - self.metainterp.reached_can_enter_jit(greenboxes, redboxes) + self.metainterp.reached_loop_header(greenboxes, redboxes) self.pc = saved_pc + else: + warmrunnerstate = jitdriver_sd.warmstate + token = warmrunnerstate.get_assembler_token(greenboxes) + # warning! careful here. We have to return from the current + # frame containing the jit_merge_point, and then use + # do_recursive_call() to follow the recursive call. This is + # needed because do_recursive_call() will write its result + # with make_result_of_lastop(), so the lastop must be right: + # it must be the call to 'self', and not the jit_merge_point + # itself, which has no result at all. + assert len(self.metainterp.framestack) >= 2 + try: + self.metainterp.finishframe(None) + except ChangeFrame: + pass + frame = self.metainterp.framestack[-1] + frame.do_recursive_call(jitdriver_sd, greenboxes + redboxes, token) + raise ChangeFrame def debug_merge_point(self, jitdriver_sd, greenkey): # debugging: produce a DEBUG_MERGE_POINT operation @@ -872,6 +895,12 @@ return exc_value_box @arguments("box") + def opimpl_debug_fatalerror(self, box): + from pypy.rpython.lltypesystem import rstr, lloperation + msg = box.getref(lltype.Ptr(rstr.STR)) + lloperation.llop.debug_fatalerror(msg) + + @arguments("box") def opimpl_virtual_ref(self, box): # Details on the content of metainterp.virtualref_boxes: # @@ -1018,9 +1047,10 @@ self.metainterp.clear_exception() resbox = self.metainterp.execute_and_record_varargs(opnum, argboxes, descr=descr) - self.make_result_of_lastop(resbox) - # ^^^ this is done before handle_possible_exception() because we need - # the box to show up in get_list_of_active_boxes() + if resbox is not None: + self.make_result_of_lastop(resbox) + # ^^^ this is done before handle_possible_exception() because we + # need the box to show up in get_list_of_active_boxes() if exc: self.metainterp.handle_possible_exception() else: @@ -1323,7 +1353,8 @@ self.last_exc_value_box = None self.popframe() if self.framestack: - self.framestack[-1].make_result_of_lastop(resultbox) + if resultbox is not None: + self.framestack[-1].make_result_of_lastop(resultbox) raise ChangeFrame else: try: @@ -1552,7 +1583,7 @@ redkey = original_boxes[num_green_args:] self.resumekey = compile.ResumeFromInterpDescr(original_greenkey, redkey) - self.seen_can_enter_jit = False + self.seen_loop_header_for_jdindex = -1 try: self.interpret() except GenerateMergePoint, gmp: @@ -1579,7 +1610,7 @@ # because we cannot reconstruct the beginning of the proper loop self.current_merge_points = [(original_greenkey, -1)] self.resumekey = key - self.seen_can_enter_jit = False + self.seen_loop_header_for_jdindex = -1 try: self.prepare_resume_from_failure(key.guard_opnum) self.interpret() @@ -1609,7 +1640,7 @@ else: duplicates[box] = None - def reached_can_enter_jit(self, greenboxes, redboxes): + def reached_loop_header(self, greenboxes, redboxes): duplicates = {} self.remove_consts_and_duplicates(redboxes, len(redboxes), duplicates) @@ -1623,7 +1654,7 @@ live_arg_boxes += self.virtualizable_boxes live_arg_boxes.pop() assert len(self.virtualref_boxes) == 0, "missing virtual_ref_finish()?" - # Called whenever we reach the 'can_enter_jit' hint. + # Called whenever we reach the 'loop_header' hint. # First, attempt to make a bridge: # - if self.resumekey is a ResumeGuardDescr, it starts from a guard # that failed; @@ -2232,7 +2263,10 @@ else: resultbox = unboundmethod(self, *args) # - self.make_result_of_lastop(resultbox) + if resultbox is not None: + self.make_result_of_lastop(resultbox) + elif not we_are_translated(): + assert self._result_argcode in 'v?' # unboundmethod = getattr(MIFrame, 'opimpl_' + name).im_func argtypes = unrolling_iterable(unboundmethod.argtypes) Modified: pypy/branch/fast-forward/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/resoperation.py Thu Sep 9 01:00:13 2010 @@ -274,3 +274,51 @@ setup(__name__ == '__main__') # print out the table when run directly del _oplist + +opboolinvers = { + rop.INT_EQ: rop.INT_NE, + rop.INT_NE: rop.INT_EQ, + rop.INT_LT: rop.INT_GE, + rop.INT_GE: rop.INT_LT, + rop.INT_GT: rop.INT_LE, + rop.INT_LE: rop.INT_GT, + + rop.UINT_LT: rop.UINT_GE, + rop.UINT_GE: rop.UINT_LT, + rop.UINT_GT: rop.UINT_LE, + rop.UINT_LE: rop.UINT_GT, + + rop.FLOAT_EQ: rop.FLOAT_NE, + rop.FLOAT_NE: rop.FLOAT_EQ, + rop.FLOAT_LT: rop.FLOAT_GE, + rop.FLOAT_GE: rop.FLOAT_LT, + rop.FLOAT_GT: rop.FLOAT_LE, + rop.FLOAT_LE: rop.FLOAT_GT, + + rop.PTR_EQ: rop.PTR_NE, + rop.PTR_NE: rop.PTR_EQ, + } + +opboolreflex = { + rop.INT_EQ: rop.INT_EQ, + rop.INT_NE: rop.INT_NE, + rop.INT_LT: rop.INT_GT, + rop.INT_GE: rop.INT_LE, + rop.INT_GT: rop.INT_LT, + rop.INT_LE: rop.INT_GE, + + rop.UINT_LT: rop.UINT_GT, + rop.UINT_GE: rop.UINT_LE, + rop.UINT_GT: rop.UINT_LT, + rop.UINT_LE: rop.UINT_GE, + + rop.FLOAT_EQ: rop.FLOAT_EQ, + rop.FLOAT_NE: rop.FLOAT_NE, + rop.FLOAT_LT: rop.FLOAT_GT, + rop.FLOAT_GE: rop.FLOAT_LE, + rop.FLOAT_GT: rop.FLOAT_LT, + rop.FLOAT_LE: rop.FLOAT_GE, + + rop.PTR_EQ: rop.PTR_EQ, + rop.PTR_NE: rop.PTR_NE, + } Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_basic.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_basic.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_basic.py Thu Sep 9 01:00:13 2010 @@ -116,8 +116,9 @@ class JitMixin: basic = True - def check_loops(self, expected=None, **check): - get_stats().check_loops(expected=expected, **check) + def check_loops(self, expected=None, everywhere=False, **check): + get_stats().check_loops(expected=expected, everywhere=everywhere, + **check) def check_loop_count(self, count): """NB. This is a hack; use check_tree_loop_count() or check_enter_count() for the real thing. Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_executor.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_executor.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_executor.py Thu Sep 9 01:00:13 2010 @@ -4,14 +4,14 @@ from pypy.rpython.lltypesystem import lltype, llmemory from pypy.jit.metainterp.executor import execute from pypy.jit.metainterp.executor import execute_varargs, execute_nonspec -from pypy.jit.metainterp.resoperation import rop +from pypy.jit.metainterp.resoperation import rop, opboolinvers, opboolreflex, opname from pypy.jit.metainterp.history import BoxInt, ConstInt from pypy.jit.metainterp.history import BoxPtr, ConstPtr from pypy.jit.metainterp.history import BoxFloat, ConstFloat from pypy.jit.metainterp.history import AbstractDescr, Box from pypy.jit.metainterp import history from pypy.jit.backend.model import AbstractCPU - +from pypy.rpython.lltypesystem import llmemory, rffi class FakeDescr(AbstractDescr): pass @@ -312,3 +312,40 @@ assert box.getint() == retvalue else: assert 0, "rettype is %r" % (rettype,) + +def make_args_for_op(op, a, b): + n=opname[op] + if n[0:3] == 'INT' or n[0:4] == 'UINT': + arg1 = ConstInt(a) + arg2 = ConstInt(b) + elif n[0:5] == 'FLOAT': + arg1 = ConstFloat(float(a)) + arg2 = ConstFloat(float(b)) + elif n[0:3] == 'PTR': + arg1 = ConstPtr(rffi.cast(llmemory.GCREF, a)) + arg2 = ConstPtr(rffi.cast(llmemory.GCREF, b)) + else: + raise NotImplementedError( + "Don't know how to make args for " + n) + return arg1, arg2 + + +def test_opboolinvers(): + cpu = FakeCPU() + for op1, op2 in opboolinvers.items(): + for a in (1,2,3): + for b in (1,2,3): + arg1, arg2 = make_args_for_op(op1, a, b) + box1 = execute(cpu, None, op1, None, arg1, arg2) + box2 = execute(cpu, None, op2, None, arg1, arg2) + assert box1.value == (not box2.value) + +def test_opboolreflex(): + cpu = FakeCPU() + for op1, op2 in opboolreflex.items(): + for a in (1,2,3): + for b in (1,2,3): + arg1, arg2 = make_args_for_op(op1, a, b) + box1 = execute(cpu, None, op1, None, arg1, arg2) + box2 = execute(cpu, None, op2, None, arg2, arg1) + assert box1.value == box2.value Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_immutable.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_immutable.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_immutable.py Thu Sep 9 01:00:13 2010 @@ -17,6 +17,38 @@ assert res == 28 self.check_operations_history(getfield_gc=0, getfield_gc_pure=1, int_add=1) + def test_fields_subclass(self): + class X(object): + _immutable_fields_ = ["x"] + + def __init__(self, x): + self.x = x + + class Y(X): + _immutable_fields_ = ["y"] + + def __init__(self, x, y): + X.__init__(self, x) + self.y = y + + def f(x, y): + X(x) # force the field 'x' to be on class 'X' + z = Y(x, y) + return z.x + z.y + 5 + res = self.interp_operations(f, [23, 11]) + assert res == 39 + self.check_operations_history(getfield_gc=0, getfield_gc_pure=2, + int_add=2) + + def f(x, y): + # this time, the field 'x' only shows up on subclass 'Y' + z = Y(x, y) + return z.x + z.y + 5 + res = self.interp_operations(f, [23, 11]) + assert res == 39 + self.check_operations_history(getfield_gc=0, getfield_gc_pure=2, + int_add=2) + def test_array(self): class X(object): _immutable_fields_ = ["y[*]"] Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_jitdriver.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_jitdriver.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_jitdriver.py Thu Sep 9 01:00:13 2010 @@ -14,7 +14,6 @@ def test_simple(self): myjitdriver1 = JitDriver(greens=[], reds=['n', 'm'], - can_inline = lambda *args: False, get_printable_location = getloc1) myjitdriver2 = JitDriver(greens=['g'], reds=['r'], get_printable_location = getloc2) @@ -30,11 +29,14 @@ while r > 0: myjitdriver2.can_enter_jit(g=g, r=r) myjitdriver2.jit_merge_point(g=g, r=r) - r += loop1(r, g) - 1 + r += loop1(r, g) + (-1) return r # res = self.meta_interp(loop2, [4, 40], repeat=7, inline=True) assert res == loop2(4, 40) + # we expect only one int_sub, corresponding to the single + # compiled instance of loop1() + self.check_loops(int_sub=1) # the following numbers are not really expectations of the test # itself, but just the numbers that we got after looking carefully # at the generated machine code @@ -42,11 +44,10 @@ self.check_tree_loop_count(4) # 2 x loop, 2 x enter bridge self.check_enter_count(7) - def test_simple_inline(self): + def test_inline(self): # this is not an example of reasonable code: loop1() is unrolled # 'n/m' times, where n and m are given as red arguments. myjitdriver1 = JitDriver(greens=[], reds=['n', 'm'], - can_inline = lambda *args: True, get_printable_location = getloc1) myjitdriver2 = JitDriver(greens=['g'], reds=['r'], get_printable_location = getloc2) Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_loop.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_loop.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_loop.py Thu Sep 9 01:00:13 2010 @@ -9,6 +9,10 @@ class LoopTest(object): optimizer = OPTIMIZER_SIMPLE + automatic_promotion_result = { + 'int_add' : 6, 'int_gt' : 1, 'guard_false' : 1, 'jump' : 1, + 'guard_value' : 3 + } def meta_interp(self, f, args, policy=None): return ll_meta_interp(f, args, optimizer=self.optimizer, @@ -477,9 +481,9 @@ res = self.meta_interp(main_interpreter_loop, [1]) assert res == main_interpreter_loop(1) self.check_loop_count(1) - # XXX maybe later optimize guard_value away - self.check_loops({'int_add' : 6, 'int_gt' : 1, - 'guard_false' : 1, 'jump' : 1, 'guard_value' : 3}) + # These loops do different numbers of ops based on which optimizer we + # are testing with. + self.check_loops(self.automatic_promotion_result) def test_can_enter_jit_outside_main_loop(self): myjitdriver = JitDriver(greens=[], reds=['i', 'j', 'a']) Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_loop_spec.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_loop_spec.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_loop_spec.py Thu Sep 9 01:00:13 2010 @@ -5,6 +5,10 @@ class LoopSpecTest(test_loop.LoopTest): optimizer = OPTIMIZER_FULL + automatic_promotion_result = { + 'int_add' : 3, 'int_gt' : 1, 'guard_false' : 1, 'jump' : 1, + 'guard_value' : 1 + } # ====> test_loop.py Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py Thu Sep 9 01:00:13 2010 @@ -285,6 +285,24 @@ """ self.optimize_loop(ops, '', expected) + def test_constant_propagate_ovf(self): + ops = """ + [] + i0 = int_add_ovf(2, 3) + guard_no_overflow() [] + i1 = int_is_true(i0) + guard_true(i1) [] + i2 = int_is_zero(i1) + guard_false(i2) [] + guard_value(i0, 5) [] + jump() + """ + expected = """ + [] + jump() + """ + self.optimize_loop(ops, '', expected) + def test_constfold_all(self): from pypy.jit.backend.llgraph.llimpl import TYPES # xxx fish from pypy.jit.metainterp.executor import execute_nonspec @@ -364,6 +382,74 @@ """ self.optimize_loop(ops, 'Not', expected) + def test_constant_boolrewrite_lt(self): + ops = """ + [i0] + i1 = int_lt(i0, 0) + guard_true(i1) [] + i2 = int_ge(i0, 0) + guard_false(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_lt(i0, 0) + guard_true(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_constant_boolrewrite_gt(self): + ops = """ + [i0] + i1 = int_gt(i0, 0) + guard_true(i1) [] + i2 = int_le(i0, 0) + guard_false(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_gt(i0, 0) + guard_true(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_constant_boolrewrite_reflex(self): + ops = """ + [i0] + i1 = int_gt(i0, 0) + guard_true(i1) [] + i2 = int_lt(0, i0) + guard_true(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_gt(i0, 0) + guard_true(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_constant_boolrewrite_reflex_invers(self): + ops = """ + [i0] + i1 = int_gt(i0, 0) + guard_true(i1) [] + i2 = int_ge(0, i0) + guard_false(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_gt(i0, 0) + guard_true(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + def test_remove_consecutive_guard_value_constfold(self): ops = """ [] @@ -411,7 +497,6 @@ self.optimize_loop(ops, 'Not', expected) def test_int_is_true_1(self): - py.test.skip("XXX implement me") ops = """ [i0] i1 = int_is_true(i0) @@ -806,16 +891,10 @@ guard_nonnull(p0) [] i7 = ptr_ne(p0, p1) guard_true(i7) [] - i8 = ptr_eq(p0, p1) - guard_false(i8) [] i9 = ptr_ne(p0, p2) guard_true(i9) [] - i10 = ptr_eq(p0, p2) - guard_false(i10) [] i11 = ptr_ne(p2, p1) guard_true(i11) [] - i12 = ptr_eq(p2, p1) - guard_false(i12) [] jump(p0, p1, p2) """ self.optimize_loop(ops, 'Not, Not, Not', expected2) @@ -2037,6 +2116,33 @@ """ self.optimize_loop(ops, 'Not', expected) + def test_remove_duplicate_pure_op_ovf(self): + ops = """ + [i1] + i3 = int_add_ovf(i1, 1) + guard_no_overflow() [] + i3b = int_is_true(i3) + guard_true(i3b) [] + i4 = int_add_ovf(i1, 1) + guard_no_overflow() [] + i4b = int_is_true(i4) + guard_true(i4b) [] + escape(i3) + escape(i4) + jump(i1) + """ + expected = """ + [i1] + i3 = int_add_ovf(i1, 1) + guard_no_overflow() [] + i3b = int_is_true(i3) + guard_true(i3b) [] + escape(i3) + escape(i3) + jump(i1) + """ + self.optimize_loop(ops, "Not", expected) + def test_int_and_or_with_zero(self): ops = """ [i0, i1] @@ -2051,8 +2157,41 @@ jump(i1, i0) """ self.optimize_loop(ops, 'Not, Not', expected) - - + + def test_fold_partially_constant_ops(self): + ops = """ + [i0] + i1 = int_sub(i0, 0) + jump(i1) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + ops = """ + [i0] + i1 = int_add(i0, 0) + jump(i1) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + ops = """ + [i0] + i1 = int_add(0, i0) + jump(i1) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + # ---------- def make_fail_descr(self): Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_recursive.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_recursive.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_recursive.py Thu Sep 9 01:00:13 2010 @@ -2,10 +2,11 @@ from pypy.rlib.jit import JitDriver, we_are_jitted, OPTIMIZER_SIMPLE, hint from pypy.rlib.jit import unroll_safe, dont_look_inside from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.debug import fatalerror from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rpython.annlowlevel import hlstr -from pypy.jit.metainterp.warmspot import CannotInlineCanEnterJit, get_stats +from pypy.jit.metainterp.warmspot import get_stats class RecursiveTests: @@ -98,23 +99,18 @@ policy=StopAtXPolicy(opaque)) assert res == 1 - def get_interpreter(self, codes, always_inline=False): + def get_interpreter(self, codes): ADD = "0" JUMP_BACK = "1" CALL = "2" EXIT = "3" - if always_inline: - def can_inline(*args): - return True - else: - def can_inline(i, code): - code = hlstr(code) - return not JUMP_BACK in code + def getloc(i, code): + return 'code="%s", i=%d' % (code, i) jitdriver = JitDriver(greens = ['i', 'code'], reds = ['n'], - can_inline = can_inline) - + get_printable_location = getloc) + def interpret(codenum, n, i): code = codes[codenum] while i < len(code): @@ -162,31 +158,16 @@ assert self.meta_interp(f, [0, 0, 0], optimizer=OPTIMIZER_SIMPLE, inline=True) == 42 - self.check_loops(call_may_force = 1, call = 0) - - def test_inline_faulty_can_inline(self): - code = "021" - subcode = "301" - codes = [code, subcode] - - f = self.get_interpreter(codes, always_inline=True) - - try: - self.meta_interp(f, [0, 0, 0], optimizer=OPTIMIZER_SIMPLE, - inline=True) - except CannotInlineCanEnterJit: - pass - else: - py.test.fail("DID NOT RAISE") + # the call is fully inlined, because we jump to subcode[1], thus + # skipping completely the JUMP_BACK in subcode[0] + self.check_loops(call_may_force = 0, call_assembler = 0, call = 0) def test_guard_failure_in_inlined_function(self): def p(pc, code): code = hlstr(code) return "%s %d %s" % (code, pc, code[pc]) - def c(pc, code): - return "l" not in hlstr(code) myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'], - get_printable_location=p, can_inline=c) + get_printable_location=p) def f(code, n): pc = 0 while pc < len(code): @@ -219,10 +200,8 @@ def p(pc, code): code = hlstr(code) return "%s %d %s" % (code, pc, code[pc]) - def c(pc, code): - return "l" not in hlstr(code) myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n', 'flag'], - get_printable_location=p, can_inline=c) + get_printable_location=p) def f(code, n): pc = 0 flag = False @@ -262,10 +241,8 @@ def p(pc, code): code = hlstr(code) return "%s %d %s" % (code, pc, code[pc]) - def c(pc, code): - return "l" not in hlstr(code) myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'], - get_printable_location=p, can_inline=c) + get_printable_location=p) class Exc(Exception): pass @@ -307,10 +284,8 @@ def p(pc, code): code = hlstr(code) return "%s %d %s" % (code, pc, code[pc]) - def c(pc, code): - return "l" not in hlstr(code) myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'], - get_printable_location=p, can_inline=c) + get_printable_location=p) def f(code, n): pc = 0 @@ -524,10 +499,8 @@ def p(pc, code): code = hlstr(code) return "'%s' at %d: %s" % (code, pc, code[pc]) - def c(pc, code): - return "l" not in hlstr(code) myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'], - get_printable_location=p, can_inline=c) + get_printable_location=p) def f(code, n): pc = 0 @@ -558,6 +531,8 @@ result += f('+-cl--', i) g(50) self.meta_interp(g, [50], backendopt=True) + py.test.skip("tracing from start is by now only longer enabled " + "if a trace gets too big") self.check_tree_loop_count(3) self.check_history(int_add=1) @@ -565,10 +540,8 @@ def p(pc, code): code = hlstr(code) return "%s %d %s" % (code, pc, code[pc]) - def c(pc, code): - return "l" not in hlstr(code) myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'], - get_printable_location=p, can_inline=c) + get_printable_location=p) def f(code, n): pc = 0 @@ -607,8 +580,7 @@ def test_directly_call_assembler(self): driver = JitDriver(greens = ['codeno'], reds = ['i'], - get_printable_location = lambda codeno : str(codeno), - can_inline = lambda codeno : False) + get_printable_location = lambda codeno : str(codeno)) def portal(codeno): i = 0 @@ -624,28 +596,29 @@ def test_recursion_cant_call_assembler_directly(self): driver = JitDriver(greens = ['codeno'], reds = ['i', 'j'], - get_printable_location = lambda codeno : str(codeno), - can_inline = lambda codeno : False) + get_printable_location = lambda codeno : str(codeno)) def portal(codeno, j): i = 0 - while i < 1: - driver.can_enter_jit(codeno=codeno, i=i, j=j) + while 1: driver.jit_merge_point(codeno=codeno, i=i, j=j) - i += 1 - if j == 0: + if i == 1: + if j == 0: + return + portal(2, j - 1) + elif i == 3: return - portal(2, j - 1) + i += 1 + driver.can_enter_jit(codeno=codeno, i=i, j=j) portal(2, 50) self.meta_interp(portal, [2, 20], inline=True) - self.check_history(call_assembler=0, call_may_force=1) - self.check_enter_count_at_most(1) + self.check_loops(call_assembler=0, call_may_force=1, + everywhere=True) def test_directly_call_assembler_return(self): driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'], - get_printable_location = lambda codeno : str(codeno), - can_inline = lambda codeno : False) + get_printable_location = lambda codeno : str(codeno)) def portal(codeno): i = 0 @@ -668,8 +641,7 @@ self.x = x driver = JitDriver(greens = ['codeno'], reds = ['i'], - get_printable_location = lambda codeno : str(codeno), - can_inline = lambda codeno : False) + get_printable_location = lambda codeno : str(codeno)) def portal(codeno): i = 0 @@ -690,8 +662,7 @@ def test_directly_call_assembler_fail_guard(self): driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'], - get_printable_location = lambda codeno : str(codeno), - can_inline = lambda codeno : False) + get_printable_location = lambda codeno : str(codeno)) def portal(codeno, k): i = 0 @@ -722,8 +693,7 @@ driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], - get_printable_location = lambda codeno : str(codeno), - can_inline = lambda codeno : False) + get_printable_location = lambda codeno : str(codeno)) def main(codeno): frame = Frame() @@ -761,8 +731,7 @@ driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], - get_printable_location = lambda codeno : str(codeno), - can_inline = lambda codeno : False) + get_printable_location = lambda codeno : str(codeno)) @dont_look_inside def check_frame(subframe): @@ -802,7 +771,7 @@ res = self.meta_interp(main, [0], inline=True) assert res == main(0) - def test_directly_call_assembler_virtualizable_force(self): + def test_directly_call_assembler_virtualizable_force1(self): class Thing(object): def __init__(self, val): self.val = val @@ -812,8 +781,7 @@ driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], - get_printable_location = lambda codeno : str(codeno), - can_inline = lambda codeno : False) + get_printable_location = lambda codeno : str(codeno)) class SomewhereElse(object): pass @@ -830,6 +798,7 @@ return frame.thing.val def portal(codeno, frame): + print 'ENTER:', codeno, frame.thing.val i = 0 while i < 10: driver.can_enter_jit(frame=frame, codeno=codeno, i=i) @@ -839,11 +808,15 @@ subframe = Frame() subframe.thing = Thing(nextval) nextval = portal(1, subframe) - elif frame.thing.val > 40: - change(Thing(13)) - nextval = 13 + elif codeno == 1: + if frame.thing.val > 40: + change(Thing(13)) + nextval = 13 + else: + fatalerror("bad codeno = " + str(codeno)) frame.thing = Thing(nextval + 1) i += 1 + print 'LEAVE:', codeno, frame.thing.val return frame.thing.val res = self.meta_interp(main, [0], inline=True, @@ -852,8 +825,7 @@ def test_directly_call_assembler_virtualizable_with_array(self): myjitdriver = JitDriver(greens = ['codeno'], reds = ['n', 'x', 'frame'], - virtualizables = ['frame'], - can_inline = lambda codeno : False) + virtualizables = ['frame']) class Frame(object): _virtualizable2_ = ['l[*]', 's'] @@ -899,8 +871,7 @@ driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], - get_printable_location = lambda codeno : str(codeno), - can_inline = lambda codeno : False) + get_printable_location = lambda codeno : str(codeno)) class SomewhereElse(object): pass @@ -942,17 +913,16 @@ def test_assembler_call_red_args(self): driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'], - get_printable_location = lambda codeno : str(codeno), - can_inline = lambda codeno : False) + get_printable_location = lambda codeno : str(codeno)) def residual(k): - if k > 40: + if k > 150: return 0 return 1 def portal(codeno, k): i = 0 - while i < 10: + while i < 15: driver.can_enter_jit(codeno=codeno, i=i, k=k) driver.jit_merge_point(codeno=codeno, i=i, k=k) if codeno == 2: @@ -969,10 +939,130 @@ assert res == portal(2, 0) self.check_loops(call_assembler=2) - # There is a test which I fail to write. - # * what happens if we call recursive_call while blackholing - # this seems to be completely corner case and not really happening - # in the wild + def test_inline_without_hitting_the_loop(self): + driver = JitDriver(greens = ['codeno'], reds = ['i'], + get_printable_location = lambda codeno : str(codeno)) + + def portal(codeno): + i = 0 + while True: + driver.jit_merge_point(codeno=codeno, i=i) + if codeno < 10: + i += portal(20) + codeno += 1 + elif codeno == 10: + if i > 63: + return i + codeno = 0 + driver.can_enter_jit(codeno=codeno, i=i) + else: + return 1 + + assert portal(0) == 70 + res = self.meta_interp(portal, [0], inline=True) + assert res == 70 + self.check_loops(call_assembler=0) + + def test_inline_with_hitting_the_loop_sometimes(self): + driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'], + get_printable_location = lambda codeno : str(codeno)) + + def portal(codeno, k): + if k > 2: + return 1 + i = 0 + while True: + driver.jit_merge_point(codeno=codeno, i=i, k=k) + if codeno < 10: + i += portal(codeno + 5, k+1) + codeno += 1 + elif codeno == 10: + if i > [-1, 2000, 63][k]: + return i + codeno = 0 + driver.can_enter_jit(codeno=codeno, i=i, k=k) + else: + return 1 + + assert portal(0, 1) == 2095 + res = self.meta_interp(portal, [0, 1], inline=True) + assert res == 2095 + self.check_loops(call_assembler=6, everywhere=True) + + def test_inline_with_hitting_the_loop_sometimes_exc(self): + driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'], + get_printable_location = lambda codeno : str(codeno)) + class GotValue(Exception): + def __init__(self, result): + self.result = result + + def portal(codeno, k): + if k > 2: + raise GotValue(1) + i = 0 + while True: + driver.jit_merge_point(codeno=codeno, i=i, k=k) + if codeno < 10: + try: + portal(codeno + 5, k+1) + except GotValue, e: + i += e.result + codeno += 1 + elif codeno == 10: + if i > [-1, 2000, 63][k]: + raise GotValue(i) + codeno = 0 + driver.can_enter_jit(codeno=codeno, i=i, k=k) + else: + raise GotValue(1) + + def main(codeno, k): + try: + portal(codeno, k) + except GotValue, e: + return e.result + + assert main(0, 1) == 2095 + res = self.meta_interp(main, [0, 1], inline=True) + assert res == 2095 + self.check_loops(call_assembler=6, everywhere=True) + + def test_handle_jitexception_in_portal(self): + # a test for _handle_jitexception_in_portal in blackhole.py + driver = JitDriver(greens = ['codeno'], reds = ['i', 'str'], + get_printable_location = lambda codeno: str(codeno)) + def do_can_enter_jit(codeno, i, str): + i = (i+1)-1 # some operations + driver.can_enter_jit(codeno=codeno, i=i, str=str) + def intermediate(codeno, i, str): + if i == 9: + do_can_enter_jit(codeno, i, str) + def portal(codeno, str): + i = value.initial + while i < 10: + intermediate(codeno, i, str) + driver.jit_merge_point(codeno=codeno, i=i, str=str) + i += 1 + if codeno == 64 and i == 10: + str = portal(96, str) + str += chr(codeno+i) + return str + class Value: + initial = -1 + value = Value() + def main(): + value.initial = 0 + return (portal(64, '') + + portal(64, '') + + portal(64, '') + + portal(64, '') + + portal(64, '')) + assert main() == 'ABCDEFGHIabcdefghijJ' * 5 + for tlimit in [95, 90, 102]: + print 'tlimit =', tlimit + res = self.meta_interp(main, [], inline=True, trace_limit=tlimit) + assert ''.join(res.chars) == 'ABCDEFGHIabcdefghijJ' * 5 + class TestLLtype(RecursiveTests, LLJitMixin): pass Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_virtualizable.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_virtualizable.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_virtualizable.py Thu Sep 9 01:00:13 2010 @@ -1330,11 +1330,9 @@ def p(pc, code): code = hlstr(code) return "%s %d %s" % (code, pc, code[pc]) - def c(pc, code): - return "l" not in hlstr(code) myjitdriver = JitDriver(greens=['pc', 'code'], reds=['frame'], virtualizables=["frame"], - get_printable_location=p, can_inline=c) + get_printable_location=p) def f(code, frame): pc = 0 while pc < len(code): Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_warmspot.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_warmspot.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_warmspot.py Thu Sep 9 01:00:13 2010 @@ -272,6 +272,30 @@ self.check_enter_count_at_most(2) self.check_loops(call=0) + def test_loop_header(self): + # artificial test: we enter into the JIT only when can_enter_jit() + # is seen, but we close a loop in the JIT much more quickly + # because of loop_header(). + mydriver = JitDriver(reds = ['n', 'm'], greens = []) + + def f(m): + n = 0 + while True: + mydriver.jit_merge_point(n=n, m=m) + if n > m: + m -= 1 + if m < 0: + return n + n = 0 + mydriver.can_enter_jit(n=n, m=m) + else: + n += 1 + mydriver.loop_header() + assert f(15) == 1 + res = self.meta_interp(f, [15], backendopt=True) + assert res == 1 + self.check_loops(int_add=1) # I get 13 without the loop_header() + class TestLLWarmspot(WarmspotTests, LLJitMixin): CPUClass = runner.LLtypeCPU Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_warmstate.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_warmstate.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_warmstate.py Thu Sep 9 01:00:13 2010 @@ -61,12 +61,14 @@ _green_args_spec = [lltype.Signed, lltype.Float] state = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = state._make_jitcell_getter_default() - cell1 = get_jitcell(42, 42.5) + cell1 = get_jitcell(True, 42, 42.5) assert isinstance(cell1, JitCell) - cell2 = get_jitcell(42, 42.5) + cell2 = get_jitcell(True, 42, 42.5) assert cell1 is cell2 - cell3 = get_jitcell(41, 42.5) - cell4 = get_jitcell(42, 0.25) + cell3 = get_jitcell(True, 41, 42.5) + assert get_jitcell(False, 42, 0.25) is None + cell4 = get_jitcell(True, 42, 0.25) + assert get_jitcell(False, 42, 0.25) is cell4 assert cell1 is not cell3 is not cell4 is not cell1 def test_make_jitcell_getter(): @@ -75,8 +77,8 @@ _get_jitcell_at_ptr = None state = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = state.make_jitcell_getter() - cell1 = get_jitcell(1.75) - cell2 = get_jitcell(1.75) + cell1 = get_jitcell(True, 1.75) + cell2 = get_jitcell(True, 1.75) assert cell1 is cell2 assert get_jitcell is state.make_jitcell_getter() @@ -103,14 +105,16 @@ # state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) get_jitcell = state._make_jitcell_getter_custom() - cell1 = get_jitcell(5, 42.5) + cell1 = get_jitcell(True, 5, 42.5) assert isinstance(cell1, JitCell) assert cell1.x == 5 assert cell1.y == 42.5 - cell2 = get_jitcell(5, 42.5) + cell2 = get_jitcell(True, 5, 42.5) assert cell2 is cell1 - cell3 = get_jitcell(41, 42.5) - cell4 = get_jitcell(42, 0.25) + cell3 = get_jitcell(True, 41, 42.5) + assert get_jitcell(False, 42, 0.25) is None + cell4 = get_jitcell(True, 42, 0.25) + assert get_jitcell(False, 42, 0.25) is cell4 assert cell1 is not cell3 is not cell4 is not cell1 def test_make_set_future_values(): @@ -153,52 +157,25 @@ state.attach_unoptimized_bridge_from_interp([ConstInt(5), ConstFloat(2.25)], "entry loop token") - cell1 = get_jitcell(5, 2.25) + cell1 = get_jitcell(True, 5, 2.25) assert cell1.counter < 0 assert cell1.entry_loop_token == "entry loop token" def test_make_jitdriver_callbacks_1(): class FakeJitDriverSD: _green_args_spec = [lltype.Signed, lltype.Float] - _can_inline_ptr = None _get_printable_location_ptr = None _confirm_enter_jit_ptr = None class FakeCell: dont_trace_here = False state = WarmEnterState(None, FakeJitDriverSD()) - def jit_getter(*args): + def jit_getter(build, *args): return FakeCell() state.jit_getter = jit_getter state.make_jitdriver_callbacks() - res = state.can_inline_callable([ConstInt(5), ConstFloat(42.5)]) - assert res is True res = state.get_location_str([ConstInt(5), ConstFloat(42.5)]) assert res == '(no jitdriver.get_printable_location!)' -def test_make_jitdriver_callbacks_2(): - def can_inline(x, y): - assert x == 5 - assert y == 42.5 - return False - CAN_INLINE = lltype.Ptr(lltype.FuncType([lltype.Signed, lltype.Float], - lltype.Bool)) - class FakeCell: - dont_trace_here = False - class FakeWarmRunnerDesc: - rtyper = None - class FakeJitDriverSD: - _green_args_spec = [lltype.Signed, lltype.Float] - _can_inline_ptr = llhelper(CAN_INLINE, can_inline) - _get_printable_location_ptr = None - _confirm_enter_jit_ptr = None - state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) - def jit_getter(*args): - return FakeCell() - state.jit_getter = jit_getter - state.make_jitdriver_callbacks() - res = state.can_inline_callable([ConstInt(5), ConstFloat(42.5)]) - assert res is False - def test_make_jitdriver_callbacks_3(): def get_location(x, y): assert x == 5 @@ -210,7 +187,6 @@ rtyper = None class FakeJitDriverSD: _green_args_spec = [lltype.Signed, lltype.Float] - _can_inline_ptr = None _get_printable_location_ptr = llhelper(GET_LOCATION, get_location) _confirm_enter_jit_ptr = None _get_jitcell_at_ptr = None @@ -231,7 +207,6 @@ rtyper = None class FakeJitDriverSD: _green_args_spec = [lltype.Signed, lltype.Float] - _can_inline_ptr = None _get_printable_location_ptr = None _confirm_enter_jit_ptr = llhelper(ENTER_JIT, confirm_enter_jit) _get_jitcell_at_ptr = None Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_ztranslation.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_ztranslation.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_ztranslation.py Thu Sep 9 01:00:13 2010 @@ -37,15 +37,12 @@ return jitcellcache.entry def get_printable_location(): return '(hello world)' - def can_inline(): - return False jitdriver = JitDriver(greens = [], reds = ['total', 'frame'], virtualizables = ['frame'], get_jitcell_at=get_jitcell_at, set_jitcell_at=set_jitcell_at, - get_printable_location=get_printable_location, - can_inline=can_inline) + get_printable_location=get_printable_location) def f(i): for param in unroll_parameters: defl = PARAMETERS[param] @@ -63,8 +60,7 @@ frame.i -= 1 return total * 10 # - myjitdriver2 = JitDriver(greens = ['g'], reds = ['m', 'x'], - can_inline = lambda *args: False) + myjitdriver2 = JitDriver(greens = ['g'], reds = ['m', 'x']) def f2(g, m, x): while m > 0: myjitdriver2.can_enter_jit(g=g, m=m, x=x) Modified: pypy/branch/fast-forward/pypy/jit/metainterp/warmspot.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/warmspot.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/warmspot.py Thu Sep 9 01:00:13 2010 @@ -136,9 +136,6 @@ class ContinueRunningNormallyBase(JitException): pass -class CannotInlineCanEnterJit(JitException): - pass - # ____________________________________________________________ class WarmRunnerDesc(object): @@ -402,7 +399,7 @@ can_inline = state.can_inline_greenargs num_green_args = jd.num_green_args def maybe_enter_from_start(*args): - if can_inline is not None and not can_inline(*args[:num_green_args]): + if not can_inline(*args[:num_green_args]): maybe_compile_and_run(*args) maybe_enter_from_start._always_inline_ = True jd._maybe_enter_from_start_fn = maybe_enter_from_start @@ -423,8 +420,6 @@ s_BaseJitCell_not_None) jd._get_jitcell_at_ptr = self._make_hook_graph(jd, annhelper, jd.jitdriver.get_jitcell_at, s_BaseJitCell_or_None) - jd._can_inline_ptr = self._make_hook_graph(jd, - annhelper, jd.jitdriver.can_inline, annmodel.s_Bool) jd._get_printable_location_ptr = self._make_hook_graph(jd, annhelper, jd.jitdriver.get_printable_location, s_Str) jd._confirm_enter_jit_ptr = self._make_hook_graph(jd, @@ -478,7 +473,13 @@ jitdriver = op.args[1].value assert jitdriver in sublists, \ "can_enter_jit with no matching jit_merge_point" - sublists[jitdriver].append((graph, block, index)) + origportalgraph = jd._jit_merge_point_pos[0] + if graph is not origportalgraph: + sublists[jitdriver].append((graph, block, index)) + else: + pass # a 'can_enter_jit' before the 'jit-merge_point', but + # originally in the same function: we ignore it here + # see e.g. test_jitdriver.test_simple for jd in self.jitdrivers_sd: sublist = sublists[jd.jitdriver] assert len(sublist) > 0, \ @@ -557,6 +558,7 @@ # Prepare the portal_runner() helper # from pypy.jit.metainterp.warmstate import specialize_value + from pypy.jit.metainterp.warmstate import unspecialize_value portal_ptr = self.cpu.ts.functionptr(PORTALFUNC, 'portal', graph = portalgraph) jd._portal_ptr = portal_ptr @@ -611,6 +613,37 @@ value = cast_base_ptr_to_instance(Exception, value) raise Exception, value + def handle_jitexception(e): + # XXX the bulk of this function is a copy-paste from above :-( + try: + raise e + except self.ContinueRunningNormally, e: + args = () + for ARGTYPE, attrname, count in portalfunc_ARGS: + x = getattr(e, attrname)[count] + x = specialize_value(ARGTYPE, x) + args = args + (x,) + return ll_portal_runner(*args) + except self.DoneWithThisFrameVoid: + assert result_kind == 'void' + return + except self.DoneWithThisFrameInt, e: + assert result_kind == 'int' + return specialize_value(RESULT, e.result) + except self.DoneWithThisFrameRef, e: + assert result_kind == 'ref' + return specialize_value(RESULT, e.result) + except self.DoneWithThisFrameFloat, e: + assert result_kind == 'float' + return specialize_value(RESULT, e.result) + except self.ExitFrameWithExceptionRef, e: + value = ts.cast_to_baseclass(e.value) + if not we_are_translated(): + raise LLException(ts.get_typeptr(value), value) + else: + value = cast_base_ptr_to_instance(Exception, value) + raise Exception, value + jd._ll_portal_runner = ll_portal_runner # for debugging jd.portal_runner_ptr = self.helper_func(jd._PTR_PORTAL_FUNCTYPE, ll_portal_runner) @@ -631,32 +664,8 @@ vinfo.reset_vable_token(virtualizable) try: loop_token = fail_descr.handle_fail(self.metainterp_sd, jd) - except self.ContinueRunningNormally, e: - args = () - for ARGTYPE, attrname, count in portalfunc_ARGS: - x = getattr(e, attrname)[count] - x = specialize_value(ARGTYPE, x) - args = args + (x,) - return ll_portal_runner(*args) - except self.DoneWithThisFrameVoid: - assert result_kind == 'void' - return - except self.DoneWithThisFrameInt, e: - assert result_kind == 'int' - return specialize_value(RESULT, e.result) - except self.DoneWithThisFrameRef, e: - assert result_kind == 'ref' - return specialize_value(RESULT, e.result) - except self.DoneWithThisFrameFloat, e: - assert result_kind == 'float' - return specialize_value(RESULT, e.result) - except self.ExitFrameWithExceptionRef, e: - value = ts.cast_to_baseclass(e.value) - if not we_are_translated(): - raise LLException(ts.get_typeptr(value), value) - else: - value = cast_base_ptr_to_instance(Exception, value) - raise Exception, value + except JitException, e: + return handle_jitexception(e) fail_descr = self.cpu.execute_token(loop_token) jd._assembler_call_helper = assembler_call_helper # for debugging @@ -668,6 +677,21 @@ if vinfo is not None: jd.vable_token_descr = vinfo.vable_token_descr + def handle_jitexception_from_blackhole(bhcaller, e): + result = handle_jitexception(e) + # + if result_kind != 'void': + result = unspecialize_value(result) + if result_kind == 'int': + bhcaller._setup_return_value_i(result) + elif result_kind == 'ref': + bhcaller._setup_return_value_r(result) + elif result_kind == 'float': + bhcaller._setup_return_value_f(result) + else: + assert False + jd.handle_jitexc_from_bh = handle_jitexception_from_blackhole + # ____________________________________________________________ # Now mutate origportalgraph to end with a call to portal_runner_ptr # @@ -687,17 +711,6 @@ origblock.exitswitch = None origblock.recloseblock(Link([v_result], origportalgraph.returnblock)) # - # Also kill any can_enter_jit left behind (example: see - # test_jitdriver.test_simple, which has a can_enter_jit in - # loop1's origportalgraph) - can_enter_jits = _find_jit_marker([origportalgraph], 'can_enter_jit') - for _, block, i in can_enter_jits: - op = block.operations[i] - assert op.opname == 'jit_marker' - block.operations[i] = SpaceOperation('same_as', - [Constant(None, lltype.Void)], - op.result) - # checkgraph(origportalgraph) def add_finish(self): Modified: pypy/branch/fast-forward/pypy/jit/metainterp/warmstate.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/warmstate.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/warmstate.py Thu Sep 9 01:00:13 2010 @@ -30,6 +30,22 @@ else: return lltype.cast_opaque_ptr(TYPE, x) + at specialize.ll() +def unspecialize_value(value): + """Casts 'value' to a Signed, a GCREF or a Float.""" + if isinstance(lltype.typeOf(value), lltype.Ptr): + if lltype.typeOf(value).TO._gckind == 'gc': + return lltype.cast_opaque_ptr(llmemory.GCREF, value) + else: + adr = llmemory.cast_ptr_to_adr(value) + return heaptracker.adr2int(adr) + elif isinstance(lltype.typeOf(value), ootype.OOType): + return ootype.cast_to_object(value) + elif isinstance(value, float): + return value + else: + return intmask(value) + @specialize.arg(0) def unwrap(TYPE, box): if TYPE is lltype.Void: @@ -232,7 +248,7 @@ # look for the cell corresponding to the current greenargs greenargs = args[:num_green_args] - cell = get_jitcell(*greenargs) + cell = get_jitcell(True, *greenargs) if cell.counter >= 0: # update the profiling counter @@ -324,7 +340,7 @@ # def jit_cell_at_key(greenkey): greenargs = unwrap_greenkey(greenkey) - return jit_getter(*greenargs) + return jit_getter(True, *greenargs) self.jit_cell_at_key = jit_cell_at_key self.jit_getter = jit_getter # @@ -355,10 +371,12 @@ # jitcell_dict = r_dict(comparekey, hashkey) # - def get_jitcell(*greenargs): + def get_jitcell(build, *greenargs): try: cell = jitcell_dict[greenargs] except KeyError: + if not build: + return None cell = JitCell() jitcell_dict[greenargs] = cell return cell @@ -371,38 +389,41 @@ set_jitcell_at_ptr = self.jitdriver_sd._set_jitcell_at_ptr lltohlhack = {} # - def get_jitcell(*greenargs): + def get_jitcell(build, *greenargs): fn = support.maybe_on_top_of_llinterp(rtyper, get_jitcell_at_ptr) cellref = fn(*greenargs) # if we_are_translated(): BASEJITCELL = lltype.typeOf(cellref) cell = cast_base_ptr_to_instance(JitCell, cellref) - elif isinstance(cellref, (BaseJitCell, type(None))): - BASEJITCELL = None - cell = cellref else: - BASEJITCELL = lltype.typeOf(cellref) - if cellref: - cell = lltohlhack[rtyper.type_system.deref(cellref)] + if isinstance(cellref, (BaseJitCell, type(None))): + BASEJITCELL = None + cell = cellref else: - cell = None - # + BASEJITCELL = lltype.typeOf(cellref) + if cellref: + cell = lltohlhack[rtyper.type_system.deref(cellref)] + else: + cell = None + if not build: + return cell if cell is None: cell = JitCell() # if we_are_translated(): cellref = cast_object_to_ptr(BASEJITCELL, cell) - elif BASEJITCELL is None: - cellref = cell else: - if isinstance(BASEJITCELL, lltype.Ptr): - cellref = lltype.malloc(BASEJITCELL.TO) - elif isinstance(BASEJITCELL, ootype.Instance): - cellref = ootype.new(BASEJITCELL) + if BASEJITCELL is None: + cellref = cell else: - assert False, "no clue" - lltohlhack[rtyper.type_system.deref(cellref)] = cell + if isinstance(BASEJITCELL, lltype.Ptr): + cellref = lltype.malloc(BASEJITCELL.TO) + elif isinstance(BASEJITCELL, ootype.Instance): + cellref = ootype.new(BASEJITCELL) + else: + assert False, "no clue" + lltohlhack[rtyper.type_system.deref(cellref)] = cell # fn = support.maybe_on_top_of_llinterp(rtyper, set_jitcell_at_ptr) @@ -468,34 +489,24 @@ if hasattr(self, 'get_location_str'): return # - can_inline_ptr = self.jitdriver_sd._can_inline_ptr unwrap_greenkey = self.make_unwrap_greenkey() jit_getter = self.make_jitcell_getter() - if can_inline_ptr is None: - def can_inline_callable(*greenargs): - # XXX shouldn't it be False by default? - return True - else: - rtyper = self.warmrunnerdesc.rtyper - # - def can_inline_callable(*greenargs): - fn = support.maybe_on_top_of_llinterp(rtyper, can_inline_ptr) - return fn(*greenargs) - def can_inline(*greenargs): - cell = jit_getter(*greenargs) - if cell.dont_trace_here: + + def can_inline_greenargs(*greenargs): + cell = jit_getter(False, *greenargs) + if cell is not None and cell.dont_trace_here: return False - return can_inline_callable(*greenargs) - self.can_inline_greenargs = can_inline - def can_inline_greenkey(greenkey): + return True + def can_inline_callable(greenkey): greenargs = unwrap_greenkey(greenkey) - return can_inline(*greenargs) - self.can_inline_callable = can_inline_greenkey + return can_inline_greenargs(*greenargs) + self.can_inline_greenargs = can_inline_greenargs + self.can_inline_callable = can_inline_callable def get_assembler_token(greenkey): greenargs = unwrap_greenkey(greenkey) - cell = jit_getter(*greenargs) - if cell.counter >= 0: + cell = jit_getter(False, *greenargs) + if cell is None or cell.counter >= 0: return None return cell.entry_loop_token self.get_assembler_token = get_assembler_token Modified: pypy/branch/fast-forward/pypy/jit/tl/pypyjit.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/tl/pypyjit.py (original) +++ pypy/branch/fast-forward/pypy/jit/tl/pypyjit.py Thu Sep 9 01:00:13 2010 @@ -37,6 +37,7 @@ set_opt_level(config, level='jit') config.objspace.allworkingmodules = False config.objspace.usemodules.pypyjit = True +config.objspace.usemodules.array = True config.objspace.usemodules._weakref = False config.objspace.usemodules._sre = False set_pypy_opt_level(config, level='jit') Modified: pypy/branch/fast-forward/pypy/jit/tl/pypyjit_demo.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/tl/pypyjit_demo.py (original) +++ pypy/branch/fast-forward/pypy/jit/tl/pypyjit_demo.py Thu Sep 9 01:00:13 2010 @@ -1,38 +1,64 @@ -base = object +## base = object -class Number(base): - __slots__ = ('val', ) - def __init__(self, val=0): - self.val = val - - def __add__(self, other): - if not isinstance(other, int): - other = other.val - return Number(val=self.val + other) +## class Number(base): +## __slots__ = ('val', ) +## def __init__(self, val=0): +## self.val = val + +## def __add__(self, other): +## if not isinstance(other, int): +## other = other.val +## return Number(val=self.val + other) - def __cmp__(self, other): - val = self.val - if not isinstance(other, int): - other = other.val - return cmp(val, other) - - def __nonzero__(self): - return bool(self.val) - -def g(x, inc=2): - return x + inc - -def f(n, x, inc): - while x < n: - x = g(x, inc=1) - return x - -import time -#t1 = time.time() -#f(10000000, Number(), 1) -#t2 = time.time() -#print t2 - t1 -t1 = time.time() -f(10000000, 0, 1) -t2 = time.time() -print t2 - t1 +## def __cmp__(self, other): +## val = self.val +## if not isinstance(other, int): +## other = other.val +## return cmp(val, other) + +## def __nonzero__(self): +## return bool(self.val) + +## def g(x, inc=2): +## return x + inc + +## def f(n, x, inc): +## while x < n: +## x = g(x, inc=1) +## return x + +## import time +## #t1 = time.time() +## #f(10000000, Number(), 1) +## #t2 = time.time() +## #print t2 - t1 +## t1 = time.time() +## f(10000000, 0, 1) +## t2 = time.time() +## print t2 - t1 + +try: + from array import array + def f(img): + i=0 + sa=0 + while i < img.__len__(): + sa+=img[i] + i+=1 + return sa + + img=array('h',(1,2,3,4)) + print f(img) +except Exception, e: + print "Exception: ", type(e) + print e + +## def f(): +## a=7 +## i=0 +## while i<4: +## if i<0: break +## if i<0: break +## i+=1 + +## f() Modified: pypy/branch/fast-forward/pypy/jit/tool/test/test_traceviewer.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/tool/test/test_traceviewer.py (original) +++ pypy/branch/fast-forward/pypy/jit/tool/test/test_traceviewer.py Thu Sep 9 01:00:13 2010 @@ -52,10 +52,10 @@ def test_postparse(self): real_loops = [FinalBlock("debug_merge_point(' #40 POP_TOP')", None)] - postprocess(real_loops, real_loops[:]) + postprocess(real_loops, real_loops[:], {}) assert real_loops[0].header.startswith("_runCallbacks, file '/tmp/x/twisted-trunk/twisted/internet/defer.py', line 357") def test_load_actual(self): fname = py.path.local(__file__).join('..', 'data.log.bz2') - main(str(fname), view=False) + main(str(fname), False, view=False) # assert did not explode Modified: pypy/branch/fast-forward/pypy/jit/tool/traceviewer.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/tool/traceviewer.py (original) +++ pypy/branch/fast-forward/pypy/jit/tool/traceviewer.py Thu Sep 9 01:00:13 2010 @@ -1,11 +1,12 @@ #!/usr/bin/env python -""" Usage: traceviewer.py loopfile +""" Usage: traceviewer.py [--use-threshold] loopfile """ import optparse import sys import re import math +import py import autopath from pypy.translator.tool.graphpage import GraphPage @@ -40,13 +41,13 @@ self.source = dotgen.generate(target=None) class Page(GraphPage): - def compute(self, graphs): + def compute(self, graphs, counts): dotgen = DotGen('trace') self.loops = graphs self.links = {} self.cache = {} for loop in self.loops: - loop.generate(dotgen) + loop.generate(dotgen, counts) loop.getlinks(self.links) self.cache["loop" + str(loop.no)] = loop self.source = dotgen.generate(target=None) @@ -71,9 +72,14 @@ def getlinks(self, links): links[self.linksource] = self.name() - def generate(self, dotgen): + def generate(self, dotgen, counts): + val = counts.get(self.key, 0) + if val > counts.threshold: + fillcolor = get_gradient_color(self.ratio) + else: + fillcolor = "white" dotgen.emit_node(self.name(), label=self.header, - shape='box', fillcolor=get_gradient_color(self.ratio)) + shape='box', fillcolor=fillcolor) def get_content(self): return self._content @@ -113,11 +119,11 @@ self.target = target BasicBlock.__init__(self, content) - def postprocess(self, loops, memo): - postprocess_loop(self.target, loops, memo) + def postprocess(self, loops, memo, counts): + postprocess_loop(self.target, loops, memo, counts) - def generate(self, dotgen): - BasicBlock.generate(self, dotgen) + def generate(self, dotgen, counts): + BasicBlock.generate(self, dotgen, counts) if self.target is not None: dotgen.emit_edge(self.name(), self.target.name()) @@ -127,12 +133,12 @@ self.right = right BasicBlock.__init__(self, content) - def postprocess(self, loops, memo): - postprocess_loop(self.left, loops, memo) - postprocess_loop(self.right, loops, memo) + def postprocess(self, loops, memo, counts): + postprocess_loop(self.left, loops, memo, counts) + postprocess_loop(self.right, loops, memo, counts) - def generate(self, dotgen): - BasicBlock.generate(self, dotgen) + def generate(self, dotgen, counts): + BasicBlock.generate(self, dotgen, counts) dotgen.emit_edge(self.name(), self.left.name()) dotgen.emit_edge(self.name(), self.right.name()) @@ -176,13 +182,11 @@ real_loops = [] counter = 1 bar = progressbar.ProgressBar(color='blue') - single_percent = len(loops) / 100 allloops = [] - for i, loop in enumerate(loops): + for i, loop in enumerate(loops): if i > MAX_LOOPS: return real_loops, allloops - if single_percent and i % single_percent == 0: - bar.render(i / single_percent) + bar.render((i * 100) / len(loops)) firstline = loop[:loop.find("\n")] m = re.match('# Loop (\d+)', firstline) if m: @@ -202,17 +206,19 @@ counter += loop.count("\n") + 2 return real_loops, allloops -def postprocess_loop(loop, loops, memo): +def postprocess_loop(loop, loops, memo, counts): if loop in memo: return memo.add(loop) if loop is None: return - m = re.search("debug_merge_point\(' (.*?)'", loop.content) + m = re.search("debug_merge_point\('( (.*?))'", loop.content) if m is None: name = '?' + loop.key = '?' else: - name = m.group(1) + " " + m.group(2) + name = m.group(2) + " " + m.group(3) + loop.key = m.group(1) opsno = loop.content.count("\n") lastline = loop.content[loop.content.rfind("\n", 0, len(loop.content) - 2):] m = re.search('descr= 20 and options.use_threshold: + counts.threshold = l[-20] + else: + counts.threshold = 0 + for_print = [(v, k) for k, v in counts.iteritems()] + for_print.sort() + else: + counts = {} log = logparser.parse_log_file(loopfile) loops = logparser.extract_category(log, "jit-log-opt-") real_loops, allloops = splitloops(loops) - postprocess(real_loops, allloops) + postprocess(real_loops, allloops, counts) if view: - Page(allloops).display() + Page(allloops, counts).display() if __name__ == '__main__': parser = optparse.OptionParser(usage=__doc__) + parser.add_option('--use-threshold', dest='use_threshold', + action="store_true") options, args = parser.parse_args(sys.argv) if len(args) != 2: print __doc__ sys.exit(1) - main(args[1]) + main(args[1], options.use_threshold) Modified: pypy/branch/fast-forward/pypy/module/__builtin__/compiling.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/__builtin__/compiling.py (original) +++ pypy/branch/fast-forward/pypy/module/__builtin__/compiling.py Thu Sep 9 01:00:13 2010 @@ -38,7 +38,7 @@ str_ = space.str_w(w_source) ec = space.getexecutioncontext() - if flags & ~(ec.compiler.compiler_flags | consts.PyCF_AST_ONLY | + if flags & ~(ec.compiler.compiler_flags | consts.PyCF_ONLY_AST | consts.PyCF_DONT_IMPLY_DEDENT | consts.PyCF_SOURCE_IS_UTF8): raise OperationError(space.w_ValueError, space.wrap("compile() unrecognized flags")) @@ -53,7 +53,7 @@ "or 'eval' or 'single'")) if ast_node is None: - if flags & consts.PyCF_AST_ONLY: + if flags & consts.PyCF_ONLY_AST: mod = ec.compiler.compile_to_ast(str_, filename, mode, flags) return space.wrap(mod) else: Modified: pypy/branch/fast-forward/pypy/module/__builtin__/functional.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/__builtin__/functional.py (original) +++ pypy/branch/fast-forward/pypy/module/__builtin__/functional.py Thu Sep 9 01:00:13 2010 @@ -221,8 +221,7 @@ from pypy.rlib.jit import JitDriver mapjitdriver = JitDriver(greens = ['code'], - reds = ['w_func', 'w_iter', 'result_w'], - can_inline = lambda *args: False) + reds = ['w_func', 'w_iter', 'result_w']) def map_single_user_function(code, w_func, w_iter): result_w = [] while True: Modified: pypy/branch/fast-forward/pypy/module/__builtin__/interp_classobj.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/__builtin__/interp_classobj.py (original) +++ pypy/branch/fast-forward/pypy/module/__builtin__/interp_classobj.py Thu Sep 9 01:00:13 2010 @@ -201,7 +201,7 @@ w_inst = W_InstanceObjectWithDel(space, self) else: w_inst = W_InstanceObject(space, self) - w_init = w_inst.getattr(space, space.wrap('__init__'), False) + w_init = w_inst.getattr_from_class(space, space.wrap('__init__')) if w_init is not None: w_result = space.call_args(w_init, __args__) if not space.is_w(w_result, space.w_None): @@ -337,25 +337,44 @@ space.wrap("__class__ must be set to a class")) self.w_class = w_class - - def getattr(self, space, w_name, exc=True): - w_result = space.finditem(self.w_dict, w_name) - if w_result is not None: - return w_result + def getattr_from_class(self, space, w_name): + # Look up w_name in the class dict, and call its __get__. + # This method ignores the instance dict and the __getattr__. + # Returns None if not found. w_value = self.w_class.lookup(space, w_name) if w_value is None: - if exc: - raise operationerrfmt( - space.w_AttributeError, - "%s instance has no attribute '%s'", - self.w_class.name, space.str_w(w_name)) - else: - return None + return None w_descr_get = space.lookup(w_value, '__get__') if w_descr_get is None: return w_value return space.call_function(w_descr_get, w_value, self, self.w_class) + def getattr(self, space, w_name, exc=True): + # Normal getattr rules: look up w_name in the instance dict, + # in the class dict, and then via a call to __getatttr__. + w_result = space.finditem(self.w_dict, w_name) + if w_result is not None: + return w_result + w_result = self.getattr_from_class(space, w_name) + if w_result is not None: + return w_result + w_meth = self.getattr_from_class(space, space.wrap('__getattr__')) + if w_meth is not None: + try: + return space.call_function(w_meth, w_name) + except OperationError, e: + if not exc and e.match(space, space.w_AttributeError): + return None # eat the AttributeError + raise + # not found at all + if exc: + raise operationerrfmt( + space.w_AttributeError, + "%s instance has no attribute '%s'", + self.w_class.name, space.str_w(w_name)) + else: + return None + def descr_getattribute(self, space, w_attr): name = space.str_w(w_attr) if len(name) >= 8 and name[0] == '_': @@ -363,19 +382,11 @@ return self.w_dict elif name == "__class__": return self.w_class - try: - return self.getattr(space, w_attr) - except OperationError, e: - if not e.match(space, space.w_AttributeError): - raise - w_meth = self.getattr(space, space.wrap('__getattr__'), False) - if w_meth is not None: - return space.call_function(w_meth, w_attr) - raise + return self.getattr(space, w_attr) def descr_setattr(self, space, w_name, w_value): name = unwrap_attr(space, w_name) - w_meth = self.getattr(space, space.wrap('__setattr__'), False) + w_meth = self.getattr_from_class(space, space.wrap('__setattr__')) if name and name[0] == "_": if name == '__dict__': self.setdict(space, w_value) @@ -405,7 +416,7 @@ # use setclass to raise the error self.setclass(space, None) return - w_meth = self.getattr(space, space.wrap('__delattr__'), False) + w_meth = self.getattr_from_class(space, space.wrap('__delattr__')) if w_meth is not None: space.call_function(w_meth, w_name) else: @@ -658,7 +669,10 @@ def descr_del(self, space): # Note that this is called from executioncontext.UserDelAction # via the space.userdel() method. - w_func = self.getattr(space, space.wrap('__del__'), False) + w_name = space.wrap('__del__') + w_func = space.finditem(self.w_dict, w_name) + if w_func is None: + w_func = self.getattr_from_class(space, w_name) if w_func is not None: space.call_function(w_func) Modified: pypy/branch/fast-forward/pypy/module/__builtin__/test/test_buffer.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/__builtin__/test/test_buffer.py (original) +++ pypy/branch/fast-forward/pypy/module/__builtin__/test/test_buffer.py Thu Sep 9 01:00:13 2010 @@ -1,8 +1,11 @@ """Tests some behaviour of the buffer type that is not tested in lib-python/2.5.2/test/test_types.py where the stdlib buffer tests live.""" import autopath +from pypy.conftest import gettestobjspace class AppTestBuffer: + def setup_class(cls): + cls.space = gettestobjspace(usemodules=('array',)) def test_unicode_buffer(self): import sys Modified: pypy/branch/fast-forward/pypy/module/__builtin__/test/test_classobj.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/__builtin__/test/test_classobj.py (original) +++ pypy/branch/fast-forward/pypy/module/__builtin__/test/test_classobj.py Thu Sep 9 01:00:13 2010 @@ -786,6 +786,168 @@ return [1, 2] assert reversed(X()) == [1, 2] + def test_special_method_via_getattr(self): + class A: + def __getattr__(self, attr): + print 'A getattr:', attr + def callable(*args): + print 'A called:', attr + repr(args) + return attr + repr(args) + return callable + class B: + def __getattr__(self, attr): + print 'B getattr:', attr + def callable(*args): + print 'B called:', attr, args + self.called = attr, args + if attr == '__coerce__': + return self, args[0] + return 42 + return callable + a = A() + a.instancevalue = 42 # does not go via __getattr__('__setattr__') + a.__getattr__ = "hi there, ignore me, I'm in a" + a.__setattr__ = "hi there, ignore me, I'm in a too" + assert a.instancevalue == 42 + A.classvalue = 123 + assert a.classvalue == 123 + assert a.foobar(5) == 'foobar(5,)' + assert a.__dict__ == {'instancevalue': 42, + '__getattr__': a.__getattr__, + '__setattr__': a.__setattr__} + assert a.__class__ is A + # This follows the Python 2.5 rules, more precisely. + # It is still valid in Python 2.7 too. + assert repr(a) == '__repr__()' + assert str(a) == '__str__()' + assert unicode(a) == u'__unicode__()' + b = B() + b.__getattr__ = "hi there, ignore me, I'm in b" + b.__setattr__ = "hi there, ignore me, I'm in b too" + assert 'called' not in b.__dict__ # and not e.g. ('__init__', ()) + assert len(b) == 42 + assert b.called == ('__len__', ()) + assert a[5] == '__getitem__(5,)' + b[6] = 7 + assert b.called == ('__setitem__', (6, 7)) + del b[8] + assert b.called == ('__delitem__', (8,)) + # + class C: + def __getattr__(self, name): + if name == '__iter__': + return lambda: iter([3, 33, 333]) + raise AttributeError + assert list(iter(C())) == [3, 33, 333] + # + class C: + def __getattr__(self, name): + if name == '__getitem__': + return lambda n: [3, 33, 333][n] + raise AttributeError + assert list(iter(C())) == [3, 33, 333] + # + assert a[:6] == '__getslice__(0, 6)' + b[3:5] = 7 + assert b.called == ('__setslice__', (3, 5, 7)) + del b[:-1000] + assert b.called == ('__delslice__', (0, -958)) # adds len(b)... + assert a(5) == '__call__(5,)' + raises(TypeError, bool, a) # "should return an int" + assert not not b + # + class C: + def __getattr__(self, name): + if name == '__nonzero__': + return lambda: False + raise AttributeError + assert not C() + # + class C: + def __getattr__(self, name): + if name == '__len__': + return lambda: 0 + raise AttributeError + assert not C() + # + #assert cmp(b, 43) == 0 # because __eq__(43) returns 42, so True... + # ... I will leave this case as XXX implement me + assert hash(b) == 42 + assert range(100, 200)[b] == 142 + assert "foo" in b + # + class C: + def __iter__(self): + return self + def __getattr__(self, name): + if name == 'next': + return lambda: 'the next item' + raise AttributeError + for x in C(): + assert x == 'the next item' + break + # + # XXX a really corner case: '__del__' + # + import operator + op_by_name = {"neg": operator.neg, + "pos": operator.pos, + "abs": abs, + "invert": operator.invert, + "int": int, + "long": long} + for opname, opfunc in op_by_name.items(): + assert opfunc(b) == 42 + assert b.called == ("__" + opname + "__", ()) + assert oct(a) == '__oct__()' + assert hex(a) == '__hex__()' + # + class C: + def __getattr__(self, name): + return lambda: 5.5 + raises(TypeError, float, b) + assert float(C()) == 5.5 + # + op_by_name = {'eq': operator.eq, + 'ne': operator.ne, + 'gt': operator.gt, + 'lt': operator.lt, + 'ge': operator.ge, + 'le': operator.le, + 'imod': operator.imod, + 'iand': operator.iand, + 'ipow': operator.ipow, + 'itruediv': operator.itruediv, + 'ilshift': operator.ilshift, + 'ixor': operator.ixor, + 'irshift': operator.irshift, + 'ifloordiv': operator.ifloordiv, + 'idiv': operator.idiv, + 'isub': operator.isub, + 'imul': operator.imul, + 'iadd': operator.iadd, + 'ior': operator.ior, + 'or': operator.or_, + 'and': operator.and_, + 'xor': operator.xor, + 'lshift': operator.lshift, + 'rshift': operator.rshift, + 'add': operator.add, + 'sub': operator.sub, + 'mul': operator.mul, + 'div': operator.div, + 'mod': operator.mod, + 'divmod': divmod, + 'floordiv': operator.floordiv, + 'truediv': operator.truediv} + for opname, opfunc in op_by_name.items(): + assert opfunc(b, 5) == 42 + assert b.called == ("__" + opname + "__", (5,)) + x, y = coerce(b, 5) + assert x is b + assert y == 5 + + class AppTestOldStyleSharing(AppTestOldstyle): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withsharingdict": True}) Modified: pypy/branch/fast-forward/pypy/module/_ast/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_ast/__init__.py (original) +++ pypy/branch/fast-forward/pypy/module/_ast/__init__.py Thu Sep 9 01:00:13 2010 @@ -5,7 +5,7 @@ class Module(MixedModule): interpleveldefs = { - "PyCF_AST_ONLY" : "space.wrap(%s)" % consts.PyCF_AST_ONLY + "PyCF_ONLY_AST" : "space.wrap(%s)" % consts.PyCF_ONLY_AST } appleveldefs = {} Modified: pypy/branch/fast-forward/pypy/module/_ast/test/test_ast.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_ast/test/test_ast.py (original) +++ pypy/branch/fast-forward/pypy/module/_ast/test/test_ast.py Thu Sep 9 01:00:13 2010 @@ -10,7 +10,7 @@ cls.w_get_ast = cls.space.appexec([], """(): def get_ast(source, mode="exec"): import _ast as ast - mod = compile(source, "", mode, ast.PyCF_AST_ONLY) + mod = compile(source, "", mode, ast.PyCF_ONLY_AST) assert isinstance(mod, ast.mod) return mod return get_ast""") Modified: pypy/branch/fast-forward/pypy/module/_codecs/test/test_codecs.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_codecs/test/test_codecs.py (original) +++ pypy/branch/fast-forward/pypy/module/_codecs/test/test_codecs.py Thu Sep 9 01:00:13 2010 @@ -123,6 +123,10 @@ class AppTestPartialEvaluation: + def setup_class(cls): + space = gettestobjspace(usemodules=('array',)) + cls.space = space + def test_partial_utf8(self): import _codecs encoding = 'utf-8' Modified: pypy/branch/fast-forward/pypy/module/_demo/demo.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_demo/demo.py (original) +++ pypy/branch/fast-forward/pypy/module/_demo/demo.py Thu Sep 9 01:00:13 2010 @@ -4,11 +4,14 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.rpython.lltypesystem import rffi, lltype from pypy.rpython.tool import rffi_platform +from pypy.translator.tool.cbuild import ExternalCompilationInfo import sys, math time_t = rffi_platform.getsimpletype('time_t', '#include ', rffi.LONG) -time = rffi.llexternal('time', [rffi.VOIDP], time_t, includes=['time.h']) +eci = ExternalCompilationInfo(includes=['time.h']) +time = rffi.llexternal('time', [int], time_t, + compilation_info=eci) def get(space, name): w_module = space.getbuiltinmodule('_demo') @@ -20,10 +23,10 @@ w_DemoError = get(space, 'DemoError') msg = "repetition count must be > 0" raise OperationError(w_DemoError, space.wrap(msg)) - starttime = time(None) + starttime = time(0) for i in range(repetitions): space.call_function(w_callable) - endtime = time(None) + endtime = time(0) return space.wrap(endtime - starttime) measuretime.unwrap_spec = [ObjSpace, int, W_Root] @@ -62,11 +65,16 @@ self.x = space.int_w(w_value) def mytype_new(space, w_subtype, x): + if x == 3: + return space.wrap(MySubType(space, x)) return space.wrap(W_MyType(space, x)) mytype_new.unwrap_spec = [ObjSpace, W_Root, int] getset_x = GetSetProperty(W_MyType.fget_x, W_MyType.fset_x, cls=W_MyType) +class MySubType(W_MyType): + pass + W_MyType.typedef = TypeDef('MyType', __new__ = interp2app(mytype_new), x = getset_x, Modified: pypy/branch/fast-forward/pypy/module/_file/interp_file.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_file/interp_file.py (original) +++ pypy/branch/fast-forward/pypy/module/_file/interp_file.py Thu Sep 9 01:00:13 2010 @@ -4,6 +4,7 @@ from pypy.rlib.rarithmetic import r_longlong from pypy.module._file.interp_stream import W_AbstractStream from pypy.module._file.interp_stream import StreamErrors, wrap_streamerror +from pypy.module.posix.interp_posix import dispatch_filename from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import ObjSpace, W_Root, Arguments from pypy.interpreter.typedef import TypeDef, GetSetProperty @@ -81,11 +82,11 @@ # file lock. They don't convert StreamErrors to OperationErrors, too. def direct___init__(self, w_name, mode='r', buffering=-1): - name = self.space.str_w(w_name) self.direct_close() self.w_name = w_name self.check_mode_ok(mode) - stream = streamio.open_file_as_stream(name, mode, buffering) + stream = dispatch_filename(streamio.open_file_as_stream)( + self.space, w_name, mode, buffering) fd = stream.try_to_find_file_descriptor() self.fdopenstream(stream, fd, mode) Modified: pypy/branch/fast-forward/pypy/module/_file/test/test_file.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_file/test/test_file.py (original) +++ pypy/branch/fast-forward/pypy/module/_file/test/test_file.py Thu Sep 9 01:00:13 2010 @@ -125,6 +125,15 @@ assert type(res) is str f.close() + def test_unicode_filename(self): + import sys + try: + u'\xe9'.encode(sys.getfilesystemencoding()) + except UnicodeEncodeError: + skip("encoding not good enough") + f = self.file(self.temppath + u'\xe9', "w") + f.close() + def test_oserror_has_filename(self): try: f = self.file("file that is clearly not there") Modified: pypy/branch/fast-forward/pypy/module/_file/test/test_file_extra.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_file/test/test_file_extra.py (original) +++ pypy/branch/fast-forward/pypy/module/_file/test/test_file_extra.py Thu Sep 9 01:00:13 2010 @@ -353,6 +353,10 @@ class AppTestAFewExtra: + def setup_class(cls): + space = gettestobjspace(usemodules=('array',)) + cls.space = space + def setup_method(self, method): fn = str(udir.join('temptestfile')) self.w_temptestfile = self.space.wrap(fn) Modified: pypy/branch/fast-forward/pypy/module/_locale/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_locale/__init__.py (original) +++ pypy/branch/fast-forward/pypy/module/_locale/__init__.py Thu Sep 9 01:00:13 2010 @@ -1,5 +1,4 @@ from pypy.interpreter.mixedmodule import MixedModule -from pypy.module._locale import interp_locale from pypy.rlib import rlocale import sys Modified: pypy/branch/fast-forward/pypy/module/_rawffi/test/test__rawffi.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_rawffi/test/test__rawffi.py (original) +++ pypy/branch/fast-forward/pypy/module/_rawffi/test/test__rawffi.py Thu Sep 9 01:00:13 2010 @@ -677,7 +677,12 @@ a = A(1) a[0] = -1234 a.free() - + + def test_long_with_fromaddress(self): + import _rawffi + addr = -1 + raises(ValueError, _rawffi.Array('u').fromaddress, addr, 100) + def test_passing_raw_pointers(self): import _rawffi lib = _rawffi.CDLL(self.lib_name) Modified: pypy/branch/fast-forward/pypy/module/_socket/interp_socket.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_socket/interp_socket.py (original) +++ pypy/branch/fast-forward/pypy/module/_socket/interp_socket.py Thu Sep 9 01:00:13 2010 @@ -74,7 +74,11 @@ This is like connect(address), but returns an error code (the errno value) instead of raising an exception when an error occurs. """ - error = self.connect_ex(self.addr_from_object(space, w_addr)) + try: + addr = self.addr_from_object(space, w_addr) + except SocketError, e: + raise converted_error(space, e) + error = self.connect_ex(addr) return space.wrap(error) connect_ex_w.unwrap_spec = ['self', ObjSpace, W_Root] Modified: pypy/branch/fast-forward/pypy/module/_socket/test/test_sock_app.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_socket/test/test_sock_app.py (original) +++ pypy/branch/fast-forward/pypy/module/_socket/test/test_sock_app.py Thu Sep 9 01:00:13 2010 @@ -4,7 +4,7 @@ from pypy.tool.udir import udir def setup_module(mod): - mod.space = gettestobjspace(usemodules=['_socket']) + mod.space = gettestobjspace(usemodules=['_socket', 'array']) global socket import socket mod.w_socket = space.appexec([], "(): import _socket as m; return m") @@ -221,6 +221,21 @@ "(_socket, host, port): return _socket.getaddrinfo(host, port)") assert space.unwrap(w_l) == info +def test_unknown_addr_as_object(): + from pypy.rlib import rsocket + from pypy.rpython.lltypesystem import lltype, rffi + + c_addr = lltype.malloc(rsocket._c.sockaddr, flavor='raw') + c_addr.c_sa_data[0] = 'c' + rffi.setintfield(c_addr, 'c_sa_family', 15) + # XXX what size to pass here? for the purpose of this test it has + # to be short enough so we have some data, 1 sounds good enough + # + sizeof USHORT + w_obj = rsocket.Address(c_addr, 1 + 2).as_object(space) + assert space.is_true(space.isinstance(w_obj, space.w_tuple)) + assert space.int_w(space.getitem(w_obj, space.wrap(0))) == 15 + assert space.str_w(space.getitem(w_obj, space.wrap(1))) == 'c' + def test_getnameinfo(): host = "127.0.0.1" port = 25 @@ -339,6 +354,13 @@ name = s.getpeername() # Will raise socket.error if not connected assert name[1] == 80 s.close() + + def test_socket_connect_ex(self): + import _socket + s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) + # Make sure we get an app-level error, not an interp one. + raises(_socket.gaierror, s.connect_ex, ("wrong.invalid", 80)) + s.close() def test_socket_connect_typeerrors(self): tests = [ @@ -433,7 +455,6 @@ s2 = s.dup() assert s.fileno() != s2.fileno() assert s.getsockname() == s2.getsockname() - def test_buffer_or_unicode(self): # Test that send/sendall/sendto accept a buffer or a unicode as arg Modified: pypy/branch/fast-forward/pypy/module/_sre/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_sre/__init__.py (original) +++ pypy/branch/fast-forward/pypy/module/_sre/__init__.py Thu Sep 9 01:00:13 2010 @@ -1,24 +1,14 @@ from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): - """A pure Python reimplementation of the _sre module from CPython 2.4 -Copyright 2005 Nik Haldimann, licensed under the MIT license -This code is based on material licensed under CNRI's Python 1.6 license and -copyrighted by: Copyright (c) 1997-2001 by Secret Labs AB -""" - appleveldefs = { - 'compile': 'app_sre.compile', } interpleveldefs = { 'CODESIZE': 'space.wrap(interp_sre.CODESIZE)', 'MAGIC': 'space.wrap(interp_sre.MAGIC)', - 'copyright': 'space.wrap(interp_sre.copyright)', + 'compile': 'interp_sre.W_SRE_Pattern', 'getlower': 'interp_sre.w_getlower', 'getcodesize': 'interp_sre.w_getcodesize', - '_State': 'interp_sre.make_state', - '_match': 'interp_sre.w_match', - '_search': 'interp_sre.w_search', } Modified: pypy/branch/fast-forward/pypy/module/_sre/interp_sre.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_sre/interp_sre.py (original) +++ pypy/branch/fast-forward/pypy/module/_sre/interp_sre.py Thu Sep 9 01:00:13 2010 @@ -1,27 +1,20 @@ +import sys from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.typedef import GetSetProperty, TypeDef from pypy.interpreter.typedef import interp_attrproperty, interp_attrproperty_w +from pypy.interpreter.typedef import make_weakref_descr from pypy.interpreter.gateway import interp2app, ObjSpace, W_Root from pypy.interpreter.error import OperationError from pypy.rlib.rarithmetic import intmask +from pypy.tool.pairtype import extendabletype -# This can be compiled in two ways: -# -# * THREE_VERSIONS_OF_CORE=True: you get three copies of the whole -# regexp searching and matching code: for strings, for unicode strings, -# and for generic buffer objects (like mmap.mmap or array.array). -# -# * THREE_VERSIONS_OF_CORE=False: there is only one copy of the code, -# at the cost of an indirect method call to fetch each character. - -THREE_VERSIONS_OF_CORE = True +# ____________________________________________________________ +# +# Constants and exposed functions -#### Constants and exposed functions - -from pypy.rlib.rsre import rsre -from pypy.rlib.rsre.rsre_char import MAGIC, CODESIZE, getlower -copyright = "_sre.py 2.4 Copyright 2005 by Nik Haldimann" +from pypy.rlib.rsre import rsre_core +from pypy.rlib.rsre.rsre_char import MAGIC, CODESIZE, getlower, set_unicode_db def w_getlower(space, char_ord, flags): return space.wrap(getlower(char_ord, flags)) @@ -31,166 +24,529 @@ return space.wrap(CODESIZE) # use the same version of unicodedb as the standard objspace -from pypy.objspace.std.unicodeobject import unicodedb -rsre.set_unicode_db(unicodedb) +import pypy.objspace.std.unicodeobject +set_unicode_db(pypy.objspace.std.unicodeobject.unicodedb) -#### State classes +# ____________________________________________________________ +# +# Additional methods on the classes XxxMatchContext + +class __extend__(rsre_core.AbstractMatchContext): + __metaclass__ = extendabletype + def _w_slice(self, space, start, end): + raise NotImplementedError + def _w_string(self, space): + raise NotImplementedError -def make_state(space, w_string, start, end, flags): - # XXX maybe turn this into a __new__ method of W_State - if space.is_true(space.isinstance(w_string, space.w_str)): - cls = W_StringState - elif space.is_true(space.isinstance(w_string, space.w_unicode)): - cls = W_UnicodeState - else: - cls = W_GenericState - return space.wrap(cls(space, w_string, start, end, flags)) -make_state.unwrap_spec = [ObjSpace, W_Root, int, int, int] - - -class W_State(Wrappable): - if not THREE_VERSIONS_OF_CORE: - rsre.insert_sre_methods(locals(), 'all') - - def __init__(self, space, w_string, start, end, flags): - self.space = space - self.w_string = w_string - length = self.unwrap_object() - if start < 0: - start = 0 - if end > length: - end = length - self.start = start - self.pos = start # records the original start position - self.end = end - self.flags = flags - self.reset() +class __extend__(rsre_core.StrMatchContext): + __metaclass__ = extendabletype + def _w_slice(self, space, start, end): + return space.wrap(self._string[start:end]) + def _w_string(self, space): + return space.wrap(self._string) + +class __extend__(rsre_core.UnicodeMatchContext): + __metaclass__ = extendabletype + def _w_slice(self, space, start, end): + return space.wrap(self._unicodestr[start:end]) + def _w_string(self, space): + return space.wrap(self._unicodestr) + +def slice_w(space, ctx, start, end, w_default): + if 0 <= start <= end: + return ctx._w_slice(space, start, end) + return w_default + +def do_flatten_marks(ctx, num_groups): + # Returns a list of RPython-level integers. + # Unlike the app-level groups() method, groups are numbered from 0 + # and the returned list does not start with the whole match range. + if num_groups == 0: + return None + result = [-1] * (2*num_groups) + mark = ctx.match_marks + while mark is not None: + index = mark.gid + if result[index] == -1: + result[index] = mark.position + mark = mark.prev + return result + +def allgroups_w(space, ctx, fmarks, num_groups, w_default): + grps = [slice_w(space, ctx, fmarks[i*2], fmarks[i*2+1], w_default) + for i in range(num_groups)] + return space.newtuple(grps) + +def import_re(space): + w_builtin = space.getbuiltinmodule('__builtin__') + w_import = space.getattr(w_builtin, space.wrap("__import__")) + return space.call_function(w_import, space.wrap("re")) - def lower(self, char_ord): - return getlower(char_ord, self.flags) +def matchcontext(space, ctx): + try: + return rsre_core.match_context(ctx) + except rsre_core.Error, e: + raise OperationError(space.w_RuntimeError, space.wrap(e.msg)) - # methods overridden by subclasses +def searchcontext(space, ctx): + try: + return rsre_core.search_context(ctx) + except rsre_core.Error, e: + raise OperationError(space.w_RuntimeError, space.wrap(e.msg)) - def unwrap_object(self): - raise NotImplementedError +# ____________________________________________________________ +# +# SRE_Pattern class - if 'reset' not in locals(): - def reset(self): - raise NotImplementedError - - if 'search' not in locals(): - def search(self, pattern_codes): - raise NotImplementedError - - if 'match' not in locals(): - def match(self, pattern_codes): - raise NotImplementedError - - # Accessors for the typedef - - def w_reset(self): - self.reset() - - def create_regs(self, group_count): - """ Purely abstract method - """ - raise NotImplementedError +class W_SRE_Pattern(Wrappable): - def w_create_regs(self, group_count): - """Creates a tuple of index pairs representing matched groups, a format - that's convenient for SRE_Match.""" + def cannot_copy_w(self): space = self.space - return space.newtuple([ - space.newtuple([space.wrap(value1), - space.wrap(value2)]) - for value1, value2 in self.create_regs(group_count)]) - w_create_regs.unwrap_spec = ['self', int] + raise OperationError(space.w_TypeError, + space.wrap("cannot copy this pattern object")) - def fget_start(space, self): - return space.wrap(self.start) - - def fset_start(space, self, w_value): - self.start = space.int_w(w_value) + def make_ctx(self, w_string, pos=0, endpos=sys.maxint): + """Make a StrMatchContext or a UnicodeMatchContext for searching + in the given w_string object.""" + space = self.space + if pos < 0: pos = 0 + if endpos < pos: endpos = pos + if space.is_true(space.isinstance(w_string, space.w_unicode)): + unicodestr = space.unicode_w(w_string) + if pos > len(unicodestr): pos = len(unicodestr) + if endpos > len(unicodestr): endpos = len(unicodestr) + return rsre_core.UnicodeMatchContext(self.code, unicodestr, + pos, endpos, self.flags) + else: + str = space.bufferstr_w(w_string) + if pos > len(str): pos = len(str) + if endpos > len(str): endpos = len(str) + return rsre_core.StrMatchContext(self.code, str, + pos, endpos, self.flags) + + def getmatch(self, ctx, found): + if found: + return W_SRE_Match(self, ctx) + else: + return self.space.w_None + + def match_w(self, w_string, pos=0, endpos=sys.maxint): + ctx = self.make_ctx(w_string, pos, endpos) + return self.getmatch(ctx, matchcontext(self.space, ctx)) + match_w.unwrap_spec = ['self', W_Root, int, int] + + def search_w(self, w_string, pos=0, endpos=sys.maxint): + ctx = self.make_ctx(w_string, pos, endpos) + return self.getmatch(ctx, searchcontext(self.space, ctx)) + search_w.unwrap_spec = ['self', W_Root, int, int] - def fget_string_position(space, self): - return space.wrap(self.string_position) + def findall_w(self, w_string, pos=0, endpos=sys.maxint): + space = self.space + matchlist_w = [] + ctx = self.make_ctx(w_string, pos, endpos) + while ctx.match_start <= ctx.end: + if not searchcontext(space, ctx): + break + num_groups = self.num_groups + w_emptystr = space.wrap("") + if num_groups == 0: + w_item = slice_w(space, ctx, ctx.match_start, ctx.match_end, + w_emptystr) + else: + fmarks = do_flatten_marks(ctx, num_groups) + if num_groups == 1: + w_item = slice_w(space, ctx, fmarks[0], fmarks[1], + w_emptystr) + else: + w_item = allgroups_w(space, ctx, fmarks, num_groups, + w_emptystr) + matchlist_w.append(w_item) + no_progress = (ctx.match_start == ctx.match_end) + ctx.reset(ctx.match_end + no_progress) + return space.newlist(matchlist_w) + findall_w.unwrap_spec = ['self', W_Root, int, int] + + def finditer_w(self, w_string, pos=0, endpos=sys.maxint): + # this also works as the implementation of the undocumented + # scanner() method. + ctx = self.make_ctx(w_string, pos, endpos) + scanner = W_SRE_Scanner(self, ctx) + return self.space.wrap(scanner) + finditer_w.unwrap_spec = ['self', W_Root, int, int] - def fset_string_position(space, self, w_value): - self.start = space.int_w(w_value) + def split_w(self, w_string, maxsplit=0): + space = self.space + splitlist = [] + n = 0 + last = 0 + ctx = self.make_ctx(w_string) + while not maxsplit or n < maxsplit: + if not searchcontext(space, ctx): + break + if ctx.match_start == ctx.match_end: # zero-width match + if ctx.match_start == ctx.end: # or end of string + break + ctx.reset(ctx.match_end + 1) + continue + splitlist.append(slice_w(space, ctx, last, ctx.match_start, + space.w_None)) + # add groups (if any) + fmarks = do_flatten_marks(ctx, self.num_groups) + for groupnum in range(self.num_groups): + groupstart, groupend = fmarks[groupnum*2], fmarks[groupnum*2+1] + splitlist.append(slice_w(space, ctx, groupstart, groupend, + space.w_None)) + n += 1 + last = ctx.match_end + ctx.reset(last) + splitlist.append(slice_w(space, ctx, last, ctx.end, space.w_None)) + return space.newlist(splitlist) + split_w.unwrap_spec = ['self', W_Root, int] + + def sub_w(self, w_repl, w_string, count=0): + w_item, n = self.subx(w_repl, w_string, count) + return w_item + sub_w.unwrap_spec = ['self', W_Root, W_Root, int] - def get_char_ord(self, p): - raise NotImplementedError + def subn_w(self, w_repl, w_string, count=0): + w_item, n = self.subx(w_repl, w_string, count) + space = self.space + return space.newtuple([w_item, space.wrap(n)]) + subn_w.unwrap_spec = ['self', W_Root, W_Root, int] -getset_start = GetSetProperty(W_State.fget_start, W_State.fset_start, cls=W_State) -getset_string_position = GetSetProperty(W_State.fget_string_position, - W_State.fset_string_position, cls=W_State) - -W_State.typedef = TypeDef("W_State", - string = interp_attrproperty_w("w_string", W_State), - start = getset_start, - end = interp_attrproperty("end", W_State), - string_position = getset_string_position, - pos = interp_attrproperty("pos", W_State), - lastindex = interp_attrproperty("lastindex", W_State), - reset = interp2app(W_State.w_reset), - create_regs = interp2app(W_State.w_create_regs), + def subx(self, w_ptemplate, w_string, count): + space = self.space + if space.is_true(space.callable(w_ptemplate)): + w_filter = w_ptemplate + filter_is_callable = True + else: + if space.is_true(space.isinstance(w_ptemplate, space.w_unicode)): + filter_as_unicode = space.unicode_w(w_ptemplate) + literal = u'\\' not in filter_as_unicode + else: + try: + filter_as_string = space.str_w(w_ptemplate) + except OperationError, e: + if e.async(space): + raise + literal = False + else: + literal = '\\' not in filter_as_string + if literal: + w_filter = w_ptemplate + filter_is_callable = False + else: + # not a literal; hand it over to the template compiler + w_re = import_re(space) + w_filter = space.call_method(w_re, '_subx', + space.wrap(self), w_ptemplate) + filter_is_callable = space.is_true(space.callable(w_filter)) + # + ctx = self.make_ctx(w_string) + sublist_w = [] + n = last_pos = 0 + while not count or n < count: + if not searchcontext(space, ctx): + break + if last_pos < ctx.match_start: + sublist_w.append(slice_w(space, ctx, last_pos, + ctx.match_start, space.w_None)) + start = ctx.match_end + if start == ctx.match_start: + start += 1 + nextctx = ctx.fresh_copy(start) + if not (last_pos == ctx.match_start + == ctx.match_end and n > 0): + # the above ignores empty matches on latest position + if filter_is_callable: + w_match = self.getmatch(ctx, True) + w_piece = space.call_function(w_filter, w_match) + if not space.is_w(w_piece, space.w_None): + sublist_w.append(w_piece) + else: + sublist_w.append(w_filter) + last_pos = ctx.match_end + n += 1 + elif last_pos >= ctx.end: + break # empty match at the end: finished + ctx = nextctx + + if last_pos < ctx.end: + sublist_w.append(slice_w(space, ctx, last_pos, ctx.end, + space.w_None)) + if n == 0: + # not just an optimization -- see test_sub_unicode + return w_string, n + + if space.is_true(space.isinstance(w_string, space.w_unicode)): + w_emptystr = space.wrap(u'') + else: + w_emptystr = space.wrap('') + w_item = space.call_method(w_emptystr, 'join', + space.newlist(sublist_w)) + return w_item, n + + +def SRE_Pattern__new__(space, w_subtype, w_pattern, flags, w_code, + groups=0, w_groupindex=None, w_indexgroup=None): + n = space.int_w(space.len(w_code)) + code = [0] * n + for i in range(n): + x = space.uint_w(space.getitem(w_code, space.wrap(i))) + code[i] = intmask(x) + # + w_srepat = space.allocate_instance(W_SRE_Pattern, w_subtype) + srepat = space.interp_w(W_SRE_Pattern, w_srepat) + srepat.space = space + srepat.w_pattern = w_pattern # the original uncompiled pattern + srepat.flags = flags + srepat.code = code + srepat.num_groups = groups + srepat.w_groupindex = w_groupindex + srepat.w_indexgroup = w_indexgroup + return w_srepat +SRE_Pattern__new__.unwrap_spec = [ObjSpace, W_Root, W_Root, int, W_Root, + int, W_Root, W_Root] + + +W_SRE_Pattern.typedef = TypeDef( + 'SRE_Pattern', + __new__ = interp2app(SRE_Pattern__new__), + __copy__ = interp2app(W_SRE_Pattern.cannot_copy_w), + __deepcopy__ = interp2app(W_SRE_Pattern.cannot_copy_w), + __weakref__ = make_weakref_descr(W_SRE_Pattern), + findall = interp2app(W_SRE_Pattern.findall_w), + finditer = interp2app(W_SRE_Pattern.finditer_w), + match = interp2app(W_SRE_Pattern.match_w), + scanner = interp2app(W_SRE_Pattern.finditer_w), # reuse finditer() + search = interp2app(W_SRE_Pattern.search_w), + split = interp2app(W_SRE_Pattern.split_w), + sub = interp2app(W_SRE_Pattern.sub_w), + subn = interp2app(W_SRE_Pattern.subn_w), + flags = interp_attrproperty('flags', W_SRE_Pattern), + groupindex = interp_attrproperty_w('w_groupindex', W_SRE_Pattern), + groups = interp_attrproperty('num_groups', W_SRE_Pattern), + pattern = interp_attrproperty_w('w_pattern', W_SRE_Pattern), ) +# ____________________________________________________________ +# +# SRE_Match class -class W_StringState(W_State): - if THREE_VERSIONS_OF_CORE: - rsre.insert_sre_methods(locals(), 'str') - - def unwrap_object(self): - self.string = self.space.str_w(self.w_string) - return len(self.string) - - def get_char_ord(self, p): - return ord(self.string[p]) +class W_SRE_Match(Wrappable): + flatten_cache = None + def __init__(self, srepat, ctx): + self.space = srepat.space + self.srepat = srepat + self.ctx = ctx -class W_UnicodeState(W_State): - if THREE_VERSIONS_OF_CORE: - rsre.insert_sre_methods(locals(), 'unicode') + def cannot_copy_w(self): + space = self.space + raise OperationError(space.w_TypeError, + space.wrap("cannot copy this match object")) - def unwrap_object(self): - self.unicode = self.space.unicode_w(self.w_string) - return len(self.unicode) + def group_w(self, args_w): + space = self.space + ctx = self.ctx + if len(args_w) <= 1: + if len(args_w) == 0: + start, end = ctx.match_start, ctx.match_end + else: + start, end = self.do_span(args_w[0]) + return slice_w(space, ctx, start, end, space.w_None) + else: + results = [None] * len(args_w) + for i in range(len(args_w)): + start, end = self.do_span(args_w[i]) + results[i] = slice_w(space, ctx, start, end, space.w_None) + return space.newtuple(results) + group_w.unwrap_spec = ['self', 'args_w'] + + def groups_w(self, w_default=None): + fmarks = self.flatten_marks() + num_groups = self.srepat.num_groups + return allgroups_w(self.space, self.ctx, fmarks, num_groups, w_default) - def get_char_ord(self, p): - return ord(self.unicode[p]) + def groupdict_w(self, w_default=None): + space = self.space + w_dict = space.newdict() + w_groupindex = self.srepat.w_groupindex + w_iterator = space.iter(w_groupindex) + while True: + try: + w_key = space.next(w_iterator) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break # done + w_value = space.getitem(w_groupindex, w_key) + start, end = self.do_span(w_value) + w_grp = slice_w(space, self.ctx, start, end, w_default) + space.setitem(w_dict, w_key, w_grp) + return w_dict + def expand_w(self, w_template): + space = self.space + w_re = import_re(space) + return space.call_method(w_re, '_expand', space.wrap(self.srepat), + space.wrap(self), w_template) + + def start_w(self, w_groupnum=0): + return self.space.wrap(self.do_span(w_groupnum)[0]) + + def end_w(self, w_groupnum=0): + return self.space.wrap(self.do_span(w_groupnum)[1]) + + def span_w(self, w_groupnum=0): + start, end = self.do_span(w_groupnum) + return self.space.newtuple([self.space.wrap(start), + self.space.wrap(end)]) + + def flatten_marks(self): + if self.flatten_cache is None: + num_groups = self.srepat.num_groups + self.flatten_cache = do_flatten_marks(self.ctx, num_groups) + return self.flatten_cache -class W_GenericState(W_State): - if THREE_VERSIONS_OF_CORE: - rsre.insert_sre_methods(locals(), 'generic') + def do_span(self, w_arg): + space = self.space + try: + groupnum = space.int_w(w_arg) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + w_groupnum = space.getitem(self.srepat.w_groupindex, w_arg) + groupnum = space.int_w(w_groupnum) + if groupnum == 0: + return self.ctx.match_start, self.ctx.match_end + elif 1 <= groupnum <= self.srepat.num_groups: + fmarks = self.flatten_marks() + idx = 2*(groupnum-1) + assert idx >= 0 + return fmarks[idx], fmarks[idx+1] + else: + raise OperationError(space.w_IndexError, + space.wrap("group index out of range")) + + def _last_index(self): + mark = self.ctx.match_marks + if mark is not None: + return mark.gid // 2 + 1 + return -1 + + def fget_lastgroup(space, self): + lastindex = self._last_index() + if lastindex < 0: + return space.w_None + w_result = space.finditem(self.srepat.w_indexgroup, + space.wrap(lastindex)) + if w_result is None: + return space.w_None + return w_result + + def fget_lastindex(space, self): + lastindex = self._last_index() + if lastindex >= 0: + return space.wrap(lastindex) + return space.w_None - def unwrap_object(self): - self.buffer = self.space.buffer_w(self.w_string) - return self.buffer.getlength() + def fget_pos(space, self): + return space.wrap(self.ctx.original_pos) - def get_char_ord(self, p): - return ord(self.buffer.getitem(p)) + def fget_endpos(space, self): + return space.wrap(self.ctx.end) + def fget_regs(space, self): + space = self.space + fmarks = self.flatten_marks() + num_groups = self.srepat.num_groups + result_w = [None] * (num_groups + 1) + ctx = self.ctx + result_w[0] = space.newtuple([space.wrap(ctx.match_start), + space.wrap(ctx.match_end)]) + for i in range(num_groups): + result_w[i + 1] = space.newtuple([space.wrap(fmarks[i*2]), + space.wrap(fmarks[i*2+1])]) + return space.newtuple(result_w) + + def fget_string(space, self): + return self.ctx._w_string(space) + + +W_SRE_Match.typedef = TypeDef( + 'SRE_Match', + __copy__ = interp2app(W_SRE_Match.cannot_copy_w), + __deepcopy__ = interp2app(W_SRE_Match.cannot_copy_w), + group = interp2app(W_SRE_Match.group_w), + groups = interp2app(W_SRE_Match.groups_w), + groupdict = interp2app(W_SRE_Match.groupdict_w), + start = interp2app(W_SRE_Match.start_w), + end = interp2app(W_SRE_Match.end_w), + span = interp2app(W_SRE_Match.span_w), + expand = interp2app(W_SRE_Match.expand_w), + # + re = interp_attrproperty('srepat', W_SRE_Match), + string = GetSetProperty(W_SRE_Match.fget_string), + pos = GetSetProperty(W_SRE_Match.fget_pos), + endpos = GetSetProperty(W_SRE_Match.fget_endpos), + lastgroup = GetSetProperty(W_SRE_Match.fget_lastgroup), + lastindex = GetSetProperty(W_SRE_Match.fget_lastindex), + regs = GetSetProperty(W_SRE_Match.fget_regs), +) -def w_search(space, w_state, w_pattern_codes): - state = space.interp_w(W_State, w_state) - pattern_codes = [intmask(space.uint_w(code)) for code - in space.unpackiterable(w_pattern_codes)] - try: - res = state.search(pattern_codes) - except RuntimeError: - raise OperationError(space.w_RuntimeError, - space.wrap("Internal re error")) - return space.newbool(res) - -def w_match(space, w_state, w_pattern_codes): - state = space.interp_w(W_State, w_state) - pattern_codes = [intmask(space.uint_w(code)) for code - in space.unpackiterable(w_pattern_codes)] - try: - res = state.match(pattern_codes) - except RuntimeError: - raise OperationError(space.w_RuntimeError, - space.wrap("Internal re error")) - return space.newbool(res) +# ____________________________________________________________ +# +# SRE_Scanner class +# This is mostly an internal class in CPython. +# Our version is also directly iterable, to make finditer() easier. + +class W_SRE_Scanner(Wrappable): + + def __init__(self, pattern, ctx): + self.space = pattern.space + self.srepat = pattern + self.ctx = ctx + # 'self.ctx' is always a fresh context in which no searching + # or matching succeeded so far. + + def iter_w(self): + return self.space.wrap(self) + + def next_w(self): + if self.ctx.match_start > self.ctx.end: + raise OperationError(self.space.w_StopIteration, self.space.w_None) + if not searchcontext(self.space, self.ctx): + raise OperationError(self.space.w_StopIteration, self.space.w_None) + return self.getmatch(True) + + def match_w(self): + if self.ctx.match_start > self.ctx.end: + return self.space.w_None + return self.getmatch(matchcontext(self.space, self.ctx)) + + def search_w(self): + if self.ctx.match_start > self.ctx.end: + return self.space.w_None + return self.getmatch(searchcontext(self.space, self.ctx)) + + def getmatch(self, found): + if found: + ctx = self.ctx + nextstart = ctx.match_end + nextstart += (ctx.match_start == nextstart) + self.ctx = ctx.fresh_copy(nextstart) + match = W_SRE_Match(self.srepat, ctx) + return self.space.wrap(match) + else: + self.ctx.match_start += 1 # obscure corner case + return None + +W_SRE_Scanner.typedef = TypeDef( + 'SRE_Scanner', + __iter__ = interp2app(W_SRE_Scanner.iter_w, unwrap_spec=['self']), + next = interp2app(W_SRE_Scanner.next_w, unwrap_spec=['self']), + match = interp2app(W_SRE_Scanner.match_w, unwrap_spec=['self']), + search = interp2app(W_SRE_Scanner.search_w, unwrap_spec=['self']), + pattern = interp_attrproperty('srepat', W_SRE_Scanner), +) Modified: pypy/branch/fast-forward/pypy/module/_sre/test/test_app_sre.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_sre/test/test_app_sre.py (original) +++ pypy/branch/fast-forward/pypy/module/_sre/test/test_app_sre.py Thu Sep 9 01:00:13 2010 @@ -33,8 +33,8 @@ # copy support is disabled by default in _sre.c import re p = re.compile("b") - raises(TypeError, p.__copy__) - raises(TypeError, p.__deepcopy__) + raises(TypeError, p.__copy__) # p.__copy__() should raise + raises(TypeError, p.__deepcopy__) # p.__deepcopy__() should raise def test_creation_attributes(self): import re @@ -85,9 +85,15 @@ assert ['', 'a', None, 'l', 'u', None, 'lla'] == ( re.split("b([ua]|(s))", "balbulla")) + def test_weakref(self): + import re, _weakref + _weakref.ref(re.compile(r"")) -class AppTestSreMatch: +class AppTestSreMatch: + def setup_class(cls): + cls.space = gettestobjspace(usemodules=('array', )) + def test_copy(self): import re # copy support is disabled by default in _sre.c @@ -200,6 +206,20 @@ return ret assert ("bbbbb", 3) == re.subn("a", call_me, "ababa") + def test_sub_callable_returns_none(self): + import re + def call_me(match): + return None + assert "acd" == re.sub("b", call_me, "abcd") + + def test_sub_callable_suddenly_unicode(self): + import re + def call_me(match): + if match.group() == 'A': + return unichr(0x3039) + return '' + assert (u"bb\u3039b", 2) == re.subn("[aA]", call_me, "babAb") + def test_match_array(self): import re, array a = array.array('c', 'hello') @@ -266,6 +286,18 @@ p.match().group(0), p.match().group(0)) assert None == p.match() + def test_scanner_match_detail(self): + import re + p = re.compile("a").scanner("aaXaa") + assert "a" == p.match().group(0) + assert "a" == p.match().group(0) + assert None == p.match() + assert "a" == p.match().group(0) + assert "a" == p.match().group(0) + assert None == p.match() + assert None == p.match() + assert None == p.match() + def test_scanner_search(self): import re p = re.compile("\d").scanner("bla23c5a") @@ -651,69 +683,6 @@ s.ATCODES["at_uni_non_boundary"], s.OPCODES["success"]] s.assert_match(opcodes, ["blaha", u"bl%sja" % UPPER_PI]) - def test_category_digit(self): - INDIAN_DIGIT = u"\u0966" - opcodes = [s.OPCODES["category"], s.CHCODES["category_digit"]] \ - + s.encode_literal("b") + [s.OPCODES["success"]] - s.assert_match(opcodes, ["1b", "a1b"]) - s.assert_no_match(opcodes, ["bb", "b1", u"%sb" % INDIAN_DIGIT]) - - def test_category_not_digit(self): - INDIAN_DIGIT = u"\u0966" - opcodes = [s.OPCODES["category"], s.CHCODES["category_not_digit"]] \ - + s.encode_literal("b") + [s.OPCODES["success"]] - s.assert_match(opcodes, ["bb", "1ab", u"%sb" % INDIAN_DIGIT]) - s.assert_no_match(opcodes, ["1b", "a1b"]) - - def test_category_space(self): - EM_SPACE = u"\u2001" - opcodes = s.encode_literal("b") \ - + [s.OPCODES["category"], s.CHCODES["category_space"], s.OPCODES["success"]] - s.assert_match(opcodes, ["b ", "b\n", "b\t", "b\r", "b\v", "b\f"]) - s.assert_no_match(opcodes, ["bb", "b1", u"b%s" % EM_SPACE]) - - def test_category_not_space(self): - EM_SPACE = u"\u2001" - opcodes = s.encode_literal("b") \ - + [s.OPCODES["category"], s.CHCODES["category_not_space"], s.OPCODES["success"]] - s.assert_match(opcodes, ["bb", "b1", u"b%s" % EM_SPACE]) - s.assert_no_match(opcodes, ["b ", "b\n", "b\t", "b\r", "b\v", "b\f"]) - - def test_category_word(self): - LOWER_PI = u"\u03c0" - opcodes = s.encode_literal("b") \ - + [s.OPCODES["category"], s.CHCODES["category_word"], s.OPCODES["success"]] - s.assert_match(opcodes, ["bl", "b4", "b_"]) - s.assert_no_match(opcodes, ["b ", "b\n", u"b%s" % LOWER_PI]) - - def test_category_not_word(self): - LOWER_PI = u"\u03c0" - opcodes = s.encode_literal("b") \ - + [s.OPCODES["category"], s.CHCODES["category_not_word"], s.OPCODES["success"]] - s.assert_match(opcodes, ["b ", "b\n", u"b%s" % LOWER_PI]) - s.assert_no_match(opcodes, ["bl", "b4", "b_"]) - - def test_category_linebreak(self): - LINE_SEP = u"\u2028" - opcodes = s.encode_literal("b") \ - + [s.OPCODES["category"], s.CHCODES["category_linebreak"], s.OPCODES["success"]] - s.assert_match(opcodes, ["b\n"]) - s.assert_no_match(opcodes, ["b ", "bs", "b\r", u"b%s" % LINE_SEP]) - opcodes = s.encode_literal("b") \ - + [s.OPCODES["category"], s.CHCODES["category_uni_linebreak"], s.OPCODES["success"]] - s.assert_match(opcodes, ["b\n", u"b%s" % LINE_SEP]) - - def test_category_not_linebreak(self): - LINE_SEP = u"\u2028" - opcodes = s.encode_literal("b") \ - + [s.OPCODES["category"], s.CHCODES["category_not_linebreak"], s.OPCODES["success"]] - s.assert_match(opcodes, ["b ", "bs", u"b%s" % LINE_SEP]) - s.assert_no_match(opcodes, ["b\n"]) - opcodes = s.encode_literal("b") \ - + [s.OPCODES["category"], s.CHCODES["category_uni_not_linebreak"], s.OPCODES["success"]] - s.assert_match(opcodes, ["b ", "bs"]) - s.assert_no_match(opcodes, ["b\n", u"b%s" % LINE_SEP, "b\r"]) - def test_category_loc_word(self): import locale try: @@ -871,10 +840,6 @@ s.assert_match(opcodes, ["ab", "aaaab", "baabb"]) s.assert_no_match(opcodes, ["aaa", "", "ac"]) - def test_max_until_error(self): - opcodes = [s.OPCODES["max_until"], s.OPCODES["success"]] - raises(RuntimeError, s.search, opcodes, "a") - def test_max_until_zero_width_match(self): # re.compile won't compile prospective zero-with matches (all of them?), # so we can only produce an example by directly constructing bytecodes. @@ -894,10 +859,6 @@ s.assert_no_match(opcodes, ["b"]) assert "aab" == s.search(opcodes, "aabb").group(0) - def test_min_until_error(self): - opcodes = [s.OPCODES["min_until"], s.OPCODES["success"]] - raises(RuntimeError, s.search, opcodes, "a") - def test_groupref(self): opcodes = [s.OPCODES["mark"], 0, s.OPCODES["any"], s.OPCODES["mark"], 1] \ + s.encode_literal("a") + [s.OPCODES["groupref"], 0, s.OPCODES["success"]] Modified: pypy/branch/fast-forward/pypy/module/_stackless/interp_coroutine.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_stackless/interp_coroutine.py (original) +++ pypy/branch/fast-forward/pypy/module/_stackless/interp_coroutine.py Thu Sep 9 01:00:13 2010 @@ -265,10 +265,14 @@ instr += 1 oparg = ord(code[instr]) | ord(code[instr + 1]) << 8 nargs = oparg & 0xff + nkwds = (oparg >> 8) & 0xff if space.config.objspace.opcodes.CALL_METHOD and opcode == map['CALL_METHOD']: - chain = resume_state_create(chain, 'CALL_METHOD', frame, - nargs) - elif opcode == map['CALL_FUNCTION'] and (oparg >> 8) & 0xff == 0: + if nkwds == 0: # only positional arguments + chain = resume_state_create(chain, 'CALL_METHOD', frame, + nargs) + else: # includes keyword arguments + chain = resume_state_create(chain, 'CALL_METHOD_KW', frame) + elif opcode == map['CALL_FUNCTION'] and nkwds == 0: # Only positional arguments # case1: ("CALL_FUNCTION", f, nargs, returns=w_result) chain = resume_state_create(chain, 'CALL_FUNCTION', frame, Modified: pypy/branch/fast-forward/pypy/module/cpyext/api.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/cpyext/api.py (original) +++ pypy/branch/fast-forward/pypy/module/cpyext/api.py Thu Sep 9 01:00:13 2010 @@ -1,5 +1,5 @@ import ctypes -import sys +import sys, os import atexit import py @@ -62,11 +62,15 @@ VA_LIST_P = rffi.VOIDP # rffi.COpaquePtr('va_list') CONST_STRING = lltype.Ptr(lltype.Array(lltype.Char, - hints={'nolength': True})) + hints={'nolength': True}), + use_cache=False) CONST_WSTRING = lltype.Ptr(lltype.Array(lltype.UniChar, - hints={'nolength': True})) + hints={'nolength': True}), + use_cache=False) assert CONST_STRING is not rffi.CCHARP +assert CONST_STRING == rffi.CCHARP assert CONST_WSTRING is not rffi.CWCHARP +assert CONST_WSTRING == rffi.CWCHARP # FILE* interface FILEP = rffi.COpaquePtr('FILE') @@ -896,6 +900,8 @@ initfunctype = lltype.Ptr(lltype.FuncType([], lltype.Void)) @unwrap_spec(ObjSpace, str, str) def load_extension_module(space, path, name): + if os.sep not in path: + path = os.curdir + os.sep + path # force a '/' in the path state = space.fromcache(State) state.package_context = name try: Modified: pypy/branch/fast-forward/pypy/module/cpyext/methodobject.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/cpyext/methodobject.py (original) +++ pypy/branch/fast-forward/pypy/module/cpyext/methodobject.py Thu Sep 9 01:00:13 2010 @@ -100,7 +100,11 @@ return generic_cpy_call(space, self.ml.c_ml_meth, w_self, w_arg) def get_doc(space, self): - return space.wrap(rffi.charp2str(self.ml.c_ml_doc)) + doc = self.ml.c_ml_doc + if doc: + return space.wrap(rffi.charp2str(doc)) + else: + return space.w_None class W_PyCMethodObject(W_PyCFunctionObject): Modified: pypy/branch/fast-forward/pypy/module/cpyext/stubs.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/cpyext/stubs.py (original) +++ pypy/branch/fast-forward/pypy/module/cpyext/stubs.py Thu Sep 9 01:00:13 2010 @@ -2874,36 +2874,6 @@ """ raise NotImplementedError - at cpython_api([rffi.CCHARP, Py_ssize_t, rffi.CCHARP, rffi.INTP], PyObject) -def PyUnicode_DecodeUTF16(space, s, size, errors, byteorder): - """Decode length bytes from a UTF-16 encoded buffer string and return the - corresponding Unicode object. errors (if non-NULL) defines the error - handling. It defaults to "strict". - - If byteorder is non-NULL, the decoder starts decoding using the given byte - order: - - *byteorder == -1: little endian - *byteorder == 0: native order - *byteorder == 1: big endian - - If *byteorder is zero, and the first two bytes of the input data are a - byte order mark (BOM), the decoder switches to this byte order and the BOM is - not copied into the resulting Unicode string. If *byteorder is -1 or - 1, any byte order mark is copied to the output (where it will result in - either a \ufeff or a \ufffe character). - - After completion, *byteorder is set to the current byte order at the end - of input data. - - If byteorder is NULL, the codec starts in native order mode. - - Return NULL if an exception was raised by the codec. - - This function used an int type for size. This might require - changes in your code for properly supporting 64-bit systems.""" - raise NotImplementedError - @cpython_api([rffi.CCHARP, Py_ssize_t, rffi.CCHARP, rffi.INTP, Py_ssize_t], PyObject) def PyUnicode_DecodeUTF16Stateful(space, s, size, errors, byteorder, consumed): """If consumed is NULL, behave like PyUnicode_DecodeUTF16(). If Modified: pypy/branch/fast-forward/pypy/module/cpyext/test/test_unicodeobject.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/cpyext/test/test_unicodeobject.py (original) +++ pypy/branch/fast-forward/pypy/module/cpyext/test/test_unicodeobject.py Thu Sep 9 01:00:13 2010 @@ -172,4 +172,37 @@ result = api.PyUnicode_AsASCIIString(w_ustr) assert result is None + def test_decode_utf16(self, space, api): + def test(encoded, endian, realendian=None): + encoded_charp = rffi.str2charp(encoded) + strict_charp = rffi.str2charp("strict") + if endian is not None: + pendian = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') + if endian < 0: + pendian[0] = -1 + elif endian > 0: + pendian[0] = 1 + else: + pendian[0] = 0 + else: + pendian = None + w_ustr = api.PyUnicode_DecodeUTF16(encoded_charp, len(encoded), strict_charp, pendian) + assert space.eq_w(space.call_method(w_ustr, 'encode', space.wrap('ascii')), + space.wrap("abcd")) + + rffi.free_charp(encoded_charp) + rffi.free_charp(strict_charp) + if pendian: + if realendian is not None: + assert rffi.cast(rffi.INT, realendian) == pendian[0] + lltype.free(pendian, flavor='raw') + + test("\x61\x00\x62\x00\x63\x00\x64\x00", -1) + + test("\x61\x00\x62\x00\x63\x00\x64\x00", None) + + test("\x00\x61\x00\x62\x00\x63\x00\x64", 1) + + test("\xFE\xFF\x00\x61\x00\x62\x00\x63\x00\x64", 0, 1) + test("\xFF\xFE\x61\x00\x62\x00\x63\x00\x64\x00", 0, -1) Modified: pypy/branch/fast-forward/pypy/module/cpyext/unicodeobject.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/cpyext/unicodeobject.py (original) +++ pypy/branch/fast-forward/pypy/module/cpyext/unicodeobject.py Thu Sep 9 01:00:13 2010 @@ -9,6 +9,7 @@ from pypy.module.cpyext.pyobject import PyObject, from_ref, make_typedescr from pypy.module.sys.interp_encoding import setdefaultencoding from pypy.objspace.std import unicodeobject, unicodetype +from pypy.rlib import runicode import sys ## See comment in stringobject.py. PyUnicode_FromUnicode(NULL, size) is not @@ -307,6 +308,64 @@ w_errors = space.w_None return space.call_method(w_str, 'decode', space.wrap("utf-8"), w_errors) + at cpython_api([rffi.CCHARP, Py_ssize_t, rffi.CCHARP, rffi.INTP], PyObject) +def PyUnicode_DecodeUTF16(space, s, size, llerrors, pbyteorder): + """Decode length bytes from a UTF-16 encoded buffer string and return the + corresponding Unicode object. errors (if non-NULL) defines the error + handling. It defaults to "strict". + + If byteorder is non-NULL, the decoder starts decoding using the given byte + order: + + *byteorder == -1: little endian + *byteorder == 0: native order + *byteorder == 1: big endian + + If *byteorder is zero, and the first two bytes of the input data are a + byte order mark (BOM), the decoder switches to this byte order and the BOM is + not copied into the resulting Unicode string. If *byteorder is -1 or + 1, any byte order mark is copied to the output (where it will result in + either a \ufeff or a \ufffe character). + + After completion, *byteorder is set to the current byte order at the end + of input data. + + If byteorder is NULL, the codec starts in native order mode. + + Return NULL if an exception was raised by the codec. + + This function used an int type for size. This might require + changes in your code for properly supporting 64-bit systems.""" + + string = rffi.charpsize2str(s, size) + + #FIXME: I don't like these prefixes + if pbyteorder is not None: # correct NULL check? + llbyteorder = rffi.cast(lltype.Signed, pbyteorder[0]) # compatible with int? + if llbyteorder < 0: + byteorder = "little" + elif llbyteorder > 0: + byteorder = "big" + else: + byteorder = "native" + else: + byteorder = "native" + + if llerrors: + errors = rffi.charp2str(llerrors) + else: + errors = None + + result, length, byteorder = runicode.str_decode_utf_16_helper(string, size, + errors, + True, # final ? false for multiple passes? + None, # errorhandler + byteorder) + if pbyteorder is not None: + pbyteorder[0] = rffi.cast(rffi.INT, byteorder) + + return space.wrap(result) + @cpython_api([PyObject], PyObject) def PyUnicode_AsASCIIString(space, w_unicode): """Encode a Unicode object using ASCII and return the result as Python string Modified: pypy/branch/fast-forward/pypy/module/fcntl/test/test_fcntl.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/fcntl/test/test_fcntl.py (original) +++ pypy/branch/fast-forward/pypy/module/fcntl/test/test_fcntl.py Thu Sep 9 01:00:13 2010 @@ -13,7 +13,7 @@ class AppTestFcntl: def setup_class(cls): - space = gettestobjspace(usemodules=('fcntl',)) + space = gettestobjspace(usemodules=('fcntl', 'array')) cls.space = space tmpprefix = str(udir.ensure('test_fcntl', dir=1).join('tmp_')) cls.w_tmp = space.wrap(tmpprefix) Modified: pypy/branch/fast-forward/pypy/module/marshal/test/test_marshalimpl.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/marshal/test/test_marshalimpl.py (original) +++ pypy/branch/fast-forward/pypy/module/marshal/test/test_marshalimpl.py Thu Sep 9 01:00:13 2010 @@ -1,9 +1,13 @@ from pypy.module.marshal import interp_marshal from pypy.interpreter.error import OperationError +from pypy.conftest import gettestobjspace import sys class AppTestMarshalMore: + def setup_class(cls): + space = gettestobjspace(usemodules=('array',)) + cls.space = space def test_long_0(self): import marshal Modified: pypy/branch/fast-forward/pypy/module/posix/interp_posix.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/posix/interp_posix.py (original) +++ pypy/branch/fast-forward/pypy/module/posix/interp_posix.py Thu Sep 9 01:00:13 2010 @@ -1,8 +1,9 @@ from pypy.interpreter.gateway import ObjSpace, W_Root, NoneNotWrapped from pypy.rlib import rposix, objectmodel +from pypy.rlib.objectmodel import specialize from pypy.rlib.rarithmetic import r_longlong from pypy.rlib.unroll import unrolling_iterable -from pypy.interpreter.error import OperationError, wrap_oserror +from pypy.interpreter.error import OperationError, wrap_oserror, wrap_oserror2 from pypy.rpython.module.ll_os import RegisterOs from pypy.rpython.module import ll_os_stat from pypy.rpython.lltypesystem import rffi, lltype @@ -12,15 +13,78 @@ import os, sys _WIN = sys.platform == 'win32' -def open(space, fname, flag, mode=0777): +class FileEncoder: + def __init__(self, space, w_obj): + self.space = space + self.w_obj = w_obj + + def as_bytes(self): + from pypy.module.sys.interp_encoding import getfilesystemencoding + space = self.space + w_bytes = space.call_method(self.w_obj, 'encode', + getfilesystemencoding(space)) + return space.str_w(w_bytes) + + def as_unicode(self): + return self.space.unicode_w(self.w_obj) + +class FileDecoder: + def __init__(self, space, w_obj): + self.space = space + self.w_obj = w_obj + + def as_bytes(self): + return self.space.str_w(self.w_obj) + + def as_unicode(self): + from pypy.module.sys.interp_encoding import getfilesystemencoding + space = self.space + w_unicode = space.call_method(self.w_obj, 'decode', + getfilesystemencoding(space)) + return space.unicode_w(w_unicode) + + at specialize.memo() +def dispatch_filename(func, tag=0): + def dispatch(space, w_fname, *args): + if space.isinstance_w(w_fname, space.w_unicode): + fname = FileEncoder(space, w_fname) + return func(fname, *args) + else: + fname = space.str_w(w_fname) + return func(fname, *args) + return dispatch + + at specialize.memo() +def dispatch_filename_2(func): + def dispatch(space, w_fname1, w_fname2, *args): + if space.isinstance_w(w_fname1, space.w_unicode): + fname1 = FileEncoder(space, w_fname1) + if space.isinstance_w(w_fname2, space.w_unicode): + fname2 = FileEncoder(space, w_fname2) + return func(fname1, fname2, *args) + else: + fname2 = FileDecoder(space, w_fname2) + return func(fname1, fname2, *args) + else: + fname1 = FileDecoder(space, w_fname1) + if space.isinstance_w(w_fname2, space.w_unicode): + fname2 = FileEncoder(space, w_fname2) + return func(fname1, fname2, *args) + else: + fname2 = FileDecoder(space, w_fname2) + return func(fname1, fname2, *args) + return dispatch + +def open(space, w_fname, flag, mode=0777): """Open a file (for low level IO). Return a file descriptor (a small integer).""" - try: - fd = os.open(fname, flag, mode) + try: + fd = dispatch_filename(rposix.open)( + space, w_fname, flag, mode) except OSError, e: - raise wrap_oserror(space, e, fname) + raise wrap_oserror2(space, e, w_fname) return space.wrap(fd) -open.unwrap_spec = [ObjSpace, 'path', "c_int", "c_int"] +open.unwrap_spec = [ObjSpace, W_Root, "c_int", "c_int"] def lseek(space, fd, pos, how): """Set the current position of a file descriptor. Return the new position. @@ -167,7 +231,7 @@ return build_stat_result(space, st) fstat.unwrap_spec = [ObjSpace, "c_int"] -def stat(space, path): +def stat(space, w_path): """Perform a stat system call on the given path. Return an object with (at least) the following attributes: st_mode @@ -183,22 +247,22 @@ """ try: - st = os.stat(path) + st = dispatch_filename(rposix.stat)(space, w_path) except OSError, e: - raise wrap_oserror(space, e, path) + raise wrap_oserror2(space, e, w_path) else: return build_stat_result(space, st) -stat.unwrap_spec = [ObjSpace, 'path'] +stat.unwrap_spec = [ObjSpace, W_Root] -def lstat(space, path): +def lstat(space, w_path): "Like stat(path), but do no follow symbolic links." try: - st = os.lstat(path) + st = dispatch_filename(rposix.lstat)(space, w_path) except OSError, e: - raise wrap_oserror(space, e, path) + raise wrap_oserror2(space, e, w_path) else: return build_stat_result(space, st) -lstat.unwrap_spec = [ObjSpace, 'path'] +lstat.unwrap_spec = [ObjSpace, W_Root] class StatState(object): def __init__(self, space): @@ -239,7 +303,7 @@ raise wrap_oserror(space, e) dup2.unwrap_spec = [ObjSpace, "c_int", "c_int"] -def access(space, path, mode): +def access(space, w_path, mode): """ access(path, mode) -> 1 if granted, 0 otherwise @@ -250,12 +314,12 @@ existence, or the inclusive-OR of R_OK, W_OK, and X_OK. """ try: - ok = os.access(path, mode) - except OSError, e: - raise wrap_oserror(space, e, path) + ok = dispatch_filename(rposix.access)(space, w_path, mode) + except OSError, e: + raise wrap_oserror2(space, e, w_path) else: return space.wrap(ok) -access.unwrap_spec = [ObjSpace, str, "c_int"] +access.unwrap_spec = [ObjSpace, W_Root, "c_int"] def times(space): @@ -286,32 +350,38 @@ return space.wrap(rc) system.unwrap_spec = [ObjSpace, str] -def unlink(space, path): +def unlink(space, w_path): """Remove a file (same as remove(path)).""" try: - os.unlink(path) - except OSError, e: - raise wrap_oserror(space, e, path) -unlink.unwrap_spec = [ObjSpace, 'path'] + dispatch_filename(rposix.unlink)(space, w_path) + except OSError, e: + raise wrap_oserror2(space, e, w_path) +unlink.unwrap_spec = [ObjSpace, W_Root] -def remove(space, path): +def remove(space, w_path): """Remove a file (same as unlink(path)).""" try: - os.unlink(path) - except OSError, e: - raise wrap_oserror(space, e, path) -remove.unwrap_spec = [ObjSpace, 'path'] + dispatch_filename(rposix.unlink)(space, w_path) + except OSError, e: + raise wrap_oserror2(space, e, w_path) +remove.unwrap_spec = [ObjSpace, W_Root] -def _getfullpathname(space, path): +def _getfullpathname(space, w_path): """helper for ntpath.abspath """ - posix = __import__(os.name) # nt specific try: - fullpath = posix._getfullpathname(path) + if space.isinstance_w(w_path, space.w_unicode): + path = FileEncoder(space, w_path) + fullpath = rposix._getfullpathname(path) + w_fullpath = space.wrap(fullpath) + else: + path = space.str_w(w_path) + fullpath = rposix._getfullpathname(path) + w_fullpath = space.wrap(fullpath) except OSError, e: - raise wrap_oserror(space, e, path) - else: - return space.wrap(fullpath) -_getfullpathname.unwrap_spec = [ObjSpace, str] + raise wrap_oserror2(space, e, w_path) + else: + return w_fullpath +_getfullpathname.unwrap_spec = [ObjSpace, W_Root] def getcwd(space): """Return the current working directory.""" @@ -323,35 +393,46 @@ return space.wrap(cur) getcwd.unwrap_spec = [ObjSpace] -def getcwdu(space): - """Return the current working directory as a unicode string.""" - # XXX ascii encoding for now - return space.call_method(getcwd(space), 'decode') +if sys.platform == 'win32': + def getcwdu(space): + """Return the current working directory as a unicode string.""" + try: + cur = os.getcwdu() + except OSError, e: + raise wrap_oserror(space, e) + else: + return space.wrap(cur) +else: + def getcwdu(space): + """Return the current working directory as a unicode string.""" + filesystemencoding = space.sys.filesystemencoding + return space.call_method(getcwd(space), 'decode', + space.wrap(filesystemencoding)) getcwdu.unwrap_spec = [ObjSpace] -def chdir(space, path): +def chdir(space, w_path): """Change the current working directory to the specified path.""" try: - os.chdir(path) - except OSError, e: - raise wrap_oserror(space, e, path) -chdir.unwrap_spec = [ObjSpace, str] + dispatch_filename(rposix.chdir)(space, w_path) + except OSError, e: + raise wrap_oserror2(space, e, w_path) +chdir.unwrap_spec = [ObjSpace, W_Root] -def mkdir(space, path, mode=0777): +def mkdir(space, w_path, mode=0777): """Create a directory.""" try: - os.mkdir(path, mode) - except OSError, e: - raise wrap_oserror(space, e, path) -mkdir.unwrap_spec = [ObjSpace, str, "c_int"] + dispatch_filename(rposix.mkdir)(space, w_path, mode) + except OSError, e: + raise wrap_oserror2(space, e, w_path) +mkdir.unwrap_spec = [ObjSpace, W_Root, "c_int"] -def rmdir(space, path): +def rmdir(space, w_path): """Remove a directory.""" try: - os.rmdir(path) - except OSError, e: - raise wrap_oserror(space, e, path) -rmdir.unwrap_spec = [ObjSpace, str] + dispatch_filename(rposix.rmdir)(space, w_path) + except OSError, e: + raise wrap_oserror2(space, e, w_path) +rmdir.unwrap_spec = [ObjSpace, W_Root] def strerror(space, errno): """Translate an error code to a message string.""" @@ -418,7 +499,7 @@ unsetenv.unwrap_spec = [ObjSpace, str] -def listdir(space, dirname): +def listdir(space, w_dirname): """Return a list containing the names of the entries in the directory. \tpath: path of directory to list @@ -426,12 +507,18 @@ The list is in arbitrary order. It does not include the special entries '.' and '..' even if they are present in the directory.""" try: - result = os.listdir(dirname) + if space.isinstance_w(w_dirname, space.w_unicode): + dirname = FileEncoder(space, w_dirname) + result = rposix.listdir(dirname) + result_w = [space.wrap(s) for s in result] + else: + dirname = space.str_w(w_dirname) + result = rposix.listdir(dirname) + result_w = [space.wrap(s) for s in result] except OSError, e: - raise wrap_oserror(space, e, dirname) - result_w = [space.wrap(s) for s in result] + raise wrap_oserror2(space, e, w_dirname) return space.newlist(result_w) -listdir.unwrap_spec = [ObjSpace, str] +listdir.unwrap_spec = [ObjSpace, W_Root] def pipe(space): "Create a pipe. Returns (read_end, write_end)." @@ -442,21 +529,21 @@ return space.newtuple([space.wrap(fd1), space.wrap(fd2)]) pipe.unwrap_spec = [ObjSpace] -def chmod(space, path, mode): +def chmod(space, w_path, mode): "Change the access permissions of a file." - try: - os.chmod(path, mode) - except OSError, e: - raise wrap_oserror(space, e, path) -chmod.unwrap_spec = [ObjSpace, str, "c_int"] + try: + dispatch_filename(rposix.chmod)(space, w_path, mode) + except OSError, e: + raise wrap_oserror2(space, e, w_path) +chmod.unwrap_spec = [ObjSpace, W_Root, "c_int"] -def rename(space, old, new): +def rename(space, w_old, w_new): "Rename a file or directory." - try: - os.rename(old, new) - except OSError, e: + try: + dispatch_filename_2(rposix.rename)(space, w_old, w_new) + except OSError, e: raise wrap_oserror(space, e) -rename.unwrap_spec = [ObjSpace, str, str] +rename.unwrap_spec = [ObjSpace, W_Root, W_Root] def umask(space, mask): "Set the current numeric umask and return the previous umask." @@ -590,7 +677,7 @@ raise wrap_oserror(space, e) execve.unwrap_spec = [ObjSpace, str, W_Root, W_Root] -def utime(space, path, w_tuple): +def utime(space, w_path, w_tuple): """ utime(path, (atime, mtime)) utime(path, None) @@ -599,10 +686,10 @@ """ if space.is_w(w_tuple, space.w_None): try: - os.utime(path, None) + dispatch_filename(rposix.utime, 1)(space, w_path, None) return except OSError, e: - raise wrap_oserror(space, e, path) + raise wrap_oserror2(space, e, w_path) try: msg = "utime() arg 2 must be a tuple (atime, mtime) or None" args_w = space.fixedview(w_tuple) @@ -610,14 +697,14 @@ raise OperationError(space.w_TypeError, space.wrap(msg)) actime = space.float_w(args_w[0]) modtime = space.float_w(args_w[1]) - os.utime(path, (actime, modtime)) + dispatch_filename(rposix.utime, 2)(space, w_path, (actime, modtime)) except OSError, e: - raise wrap_oserror(space, e, path) + raise wrap_oserror2(space, e, w_path) except OperationError, e: if not e.match(space, space.w_TypeError): raise raise OperationError(space.w_TypeError, space.wrap(msg)) -utime.unwrap_spec = [ObjSpace, str, W_Root] +utime.unwrap_spec = [ObjSpace, W_Root, W_Root] def setsid(space): """setsid() -> pid Modified: pypy/branch/fast-forward/pypy/module/posix/test/test_posix2.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/posix/test/test_posix2.py (original) +++ pypy/branch/fast-forward/pypy/module/posix/test/test_posix2.py Thu Sep 9 01:00:13 2010 @@ -32,6 +32,9 @@ # even when running on top of CPython 2.4. os.stat_float_times(True) + # Initialize sys.filesystemencoding + space.call_method(space.getbuiltinmodule('sys'), 'getfilesystemencoding') + def need_sparse_files(): if sys.platform == 'darwin': py.test.skip("no sparse files on default Mac OS X file system") @@ -322,15 +325,12 @@ if hasattr(__import__(os.name), "openpty"): def test_openpty(self): os = self.posix - master_fd, slave_fd = self.posix.openpty() - try: - assert isinstance(master_fd, int) - assert isinstance(slave_fd, int) - os.write(slave_fd, 'x') - assert os.read(master_fd, 1) == 'x' - finally: - os.close(master_fd) - os.close(slave_fd) + master_fd, slave_fd = os.openpty() + assert isinstance(master_fd, int) + assert isinstance(slave_fd, int) + os.write(slave_fd, 'x\n') + data = os.read(master_fd, 100) + assert data.startswith('x') if hasattr(__import__(os.name), "execv"): @@ -709,6 +709,28 @@ except OSError: pass +class AppTestUnicodeFilename: + def setup_class(cls): + ufilename = (unicode(udir.join('test_unicode_filename_')) + + u'\u65e5\u672c.txt') # "Japan" + try: + f = file(ufilename, 'w') + except UnicodeEncodeError: + py.test.skip("encoding not good enough") + f.write("test") + f.close() + cls.space = space + cls.w_filename = space.wrap(ufilename) + cls.w_posix = space.appexec([], GET_POSIX) + + def test_open(self): + fd = self.posix.open(self.filename, self.posix.O_RDONLY) + try: + content = self.posix.read(fd, 50) + finally: + self.posix.close(fd) + assert content == "test" + class TestPexpect(object): # XXX replace with AppExpectTest class as soon as possible Modified: pypy/branch/fast-forward/pypy/module/pypyjit/interp_jit.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/pypyjit/interp_jit.py (original) +++ pypy/branch/fast-forward/pypy/module/pypyjit/interp_jit.py Thu Sep 9 01:00:13 2010 @@ -9,7 +9,7 @@ import pypy.interpreter.pyopcode # for side-effects from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import ObjSpace, Arguments -from pypy.interpreter.pycode import PyCode, CO_CONTAINSLOOP +from pypy.interpreter.pycode import PyCode, CO_GENERATOR from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pyopcode import ExitFrame from opcode import opmap @@ -24,9 +24,6 @@ JUMP_ABSOLUTE = opmap['JUMP_ABSOLUTE'] -def can_inline(next_instr, bytecode): - return not bool(bytecode.co_flags & CO_CONTAINSLOOP) - def get_printable_location(next_instr, bytecode): from pypy.tool.stdlib_opcode import opcode_method_names name = opcode_method_names[ord(bytecode.co_code[next_instr])] @@ -39,7 +36,8 @@ bytecode.jit_cells[next_instr] = newcell def confirm_enter_jit(next_instr, bytecode, frame, ec): - return (frame.w_f_trace is None and + return (not (bytecode.co_flags & CO_GENERATOR) and + frame.w_f_trace is None and ec.profilefunc is None and ec.w_tracefunc is None) @@ -57,8 +55,7 @@ ## blockstack = frame.blockstack ## return (valuestackdepth, blockstack) -pypyjitdriver = PyPyJitDriver(can_inline = can_inline, - get_printable_location = get_printable_location, +pypyjitdriver = PyPyJitDriver(get_printable_location = get_printable_location, get_jitcell_at = get_jitcell_at, set_jitcell_at = set_jitcell_at, confirm_enter_jit = confirm_enter_jit) Modified: pypy/branch/fast-forward/pypy/module/pypyjit/policy.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/pypyjit/policy.py (original) +++ pypy/branch/fast-forward/pypy/module/pypyjit/policy.py Thu Sep 9 01:00:13 2010 @@ -11,7 +11,7 @@ if '.' in modname: modname, _ = modname.split('.', 1) if modname in ['pypyjit', 'signal', 'micronumpy', 'math', 'exceptions', - 'imp', 'sys']: + 'imp', 'sys', 'array']: return True return False @@ -32,6 +32,8 @@ return False if mod.startswith('pypy.interpreter.pyparser.'): return False + if mod == 'pypy.interpreter.generator': + return False if mod.startswith('pypy.module.'): modname = mod[len('pypy.module.'):] if not self.look_inside_pypy_module(modname): Modified: pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py (original) +++ pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py Thu Sep 9 01:00:13 2010 @@ -189,7 +189,7 @@ return r ''', 28, ([5], 120), - ([20], 2432902008176640000L)) + ([25], 15511210043330985984000000L)) def test_factorialrec(self): self.run_source(''' @@ -200,7 +200,7 @@ return 1 ''', 0, ([5], 120), - ([20], 2432902008176640000L)) + ([25], 15511210043330985984000000L)) def test_richards(self): self.run_source(''' @@ -615,6 +615,237 @@ return total ''', 170, ([], 4999450000L)) + def test_boolrewrite_invers(self): + for a, b, res, ops in (('2000', '2000', 20001000, 51), + ( '500', '500', 15001500, 81), + ( '300', '600', 16001700, 83), + ( 'a', 'b', 16001700, 89), + ( 'a', 'a', 13001700, 85)): + + self.run_source(''' + def main(): + sa = 0 + a = 300 + b = 600 + for i in range(1000): + if i < %s: sa += 1 + else: sa += 2 + if i >= %s: sa += 10000 + else: sa += 20000 + return sa + '''%(a, b), ops, ([], res)) + + def test_boolrewrite_reflex(self): + for a, b, res, ops in (('2000', '2000', 10001000, 51), + ( '500', '500', 15001500, 81), + ( '300', '600', 14001700, 83), + ( 'a', 'b', 14001700, 89), + ( 'a', 'a', 17001700, 85)): + + self.run_source(''' + def main(): + sa = 0 + a = 300 + b = 600 + for i in range(1000): + if i < %s: sa += 1 + else: sa += 2 + if %s > i: sa += 10000 + else: sa += 20000 + return sa + '''%(a, b), ops, ([], res)) + + + def test_boolrewrite_correct_invers(self): + def opval(i, op, a): + if eval('%d %s %d' % (i, op, a)): return 1 + return 2 + + ops = ('<', '>', '<=', '>=', '==', '!=') + for op1 in ops: + for op2 in ops: + for a,b in ((500, 500), (300, 600)): + res = 0 + res += opval(a-1, op1, a) * (a) + res += opval( a, op1, a) + res += opval(a+1, op1, a) * (1000 - a - 1) + res += opval(b-1, op2, b) * 10000 * (b) + res += opval( b, op2, b) * 10000 + res += opval(b+1, op2, b) * 10000 * (1000 - b - 1) + + self.run_source(''' + def main(): + sa = 0 + for i in range(1000): + if i %s %d: sa += 1 + else: sa += 2 + if i %s %d: sa += 10000 + else: sa += 20000 + return sa + '''%(op1, a, op2, b), 83, ([], res)) + + self.run_source(''' + def main(): + sa = 0 + i = 0.0 + while i < 250.0: + if i %s %f: sa += 1 + else: sa += 2 + if i %s %f: sa += 10000 + else: sa += 20000 + i += 0.25 + return sa + '''%(op1, float(a)/4.0, op2, float(b)/4.0), 109, ([], res)) + + + def test_boolrewrite_correct_reflex(self): + def opval(i, op, a): + if eval('%d %s %d' % (i, op, a)): return 1 + return 2 + + ops = ('<', '>', '<=', '>=', '==', '!=') + for op1 in ops: + for op2 in ops: + for a,b in ((500, 500), (300, 600)): + res = 0 + res += opval(a-1, op1, a) * (a) + res += opval( a, op1, a) + res += opval(a+1, op1, a) * (1000 - a - 1) + res += opval(b, op2, b-1) * 10000 * (b) + res += opval(b, op2, b) * 10000 + res += opval(b, op2, b+1) * 10000 * (1000 - b - 1) + + self.run_source(''' + def main(): + sa = 0 + for i in range(1000): + if i %s %d: sa += 1 + else: sa += 2 + if %d %s i: sa += 10000 + else: sa += 20000 + return sa + '''%(op1, a, b, op2), 83, ([], res)) + + self.run_source(''' + def main(): + sa = 0 + i = 0.0 + while i < 250.0: + if i %s %f: sa += 1 + else: sa += 2 + if %f %s i: sa += 10000 + else: sa += 20000 + i += 0.25 + return sa + '''%(op1, float(a)/4.0, float(b)/4.0, op2), 109, ([], res)) + + def test_boolrewrite_ptr(self): + compares = ('a == b', 'b == a', 'a != b', 'b != a', 'a == c', 'c != b') + for e1 in compares: + for e2 in compares: + a, b, c = 1, 2, 3 + if eval(e1): res = 752 * 1 + else: res = 752 * 2 + if eval(e2): res += 752 * 10000 + else: res += 752 * 20000 + a = b + if eval(e1): res += 248 * 1 + else: res += 248 * 2 + if eval(e2): res += 248 * 10000 + else: res += 248 * 20000 + + + if 'c' in e1 or 'c' in e2: + n = 337 + else: + n = 215 + + self.run_source(''' + class tst: + pass + def main(): + a = tst() + b = tst() + c = tst() + sa = 0 + for i in range(1000): + if %s: sa += 1 + else: sa += 2 + if %s: sa += 10000 + else: sa += 20000 + if i > 750: a = b + return sa + '''%(e1, e2), n, ([], res)) + + def test_array_sum(self): + for tc, maxops in zip('bhilBHILfd', (38,) * 6 + (40, 40, 41, 38)): + res = 19352859 + if tc in 'IL': + res = long(res) + elif tc in 'fd': + res = float(res) + + self.run_source(''' + from array import array + + def main(): + img = array("%s", range(127) * 5) * 484 + l, i = 0, 0 + while i < 640 * 480: + l += img[i] + i += 1 + return l + ''' % tc, maxops, ([], res)) + + def test_array_sum_char(self): + self.run_source(''' + from array import array + + def main(): + img = array("c", "Hello") * 130 * 480 + l, i = 0, 0 + while i < 640 * 480: + l += ord(img[i]) + i += 1 + return l + ''', 60, ([], 30720000)) + + def test_array_sum_unicode(self): + self.run_source(''' + from array import array + + def main(): + img = array("u", u"Hello") * 130 * 480 + l, i = 0, 0 + while i < 640 * 480: + if img[i] == u"l": + l += 1 + i += 1 + return l + ''', 65, ([], 122880)) + + def test_array_intimg(self): + for tc, maxops in zip('ilILd', (67, 67, 69, 69, 61)): + res = 73574560 + if tc in 'IL': + res = long(res) + elif tc in 'fd': + res = float(res) + + self.run_source(''' + from array import array + + def main(tc): + img = array(tc, range(3)) * (350 * 480) + intimg = array(tc, (0,)) * (640 * 480) + l, i = 0, 640 + while i < 640 * 480: + l = l + img[i] + intimg[i] = (intimg[i-640] + l) + i += 1 + return intimg[i - 1] + ''', maxops, ([tc], res)) + class AppTestJIT(PyPyCJITTests): def setup_class(cls): if not option.runappdirect: @@ -637,6 +868,7 @@ cls.counter = 0 cls.pypy_c = option.pypy_c + def has_info(pypy_c, option): g = os.popen('"%s" --info' % pypy_c, 'r') lines = g.readlines() Modified: pypy/branch/fast-forward/pypy/module/signal/interp_signal.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/signal/interp_signal.py (original) +++ pypy/branch/fast-forward/pypy/module/signal/interp_signal.py Thu Sep 9 01:00:13 2010 @@ -7,6 +7,7 @@ from pypy.translator.tool.cbuild import ExternalCompilationInfo import py from pypy.tool import autopath +from pypy.rlib import jit def setup(): for key, value in cpy_signal.__dict__.items(): @@ -159,10 +160,12 @@ return space.wrap(SIG_DFL) getsignal.unwrap_spec = [ObjSpace, int] + at jit.dont_look_inside def alarm(space, timeout): return space.wrap(c_alarm(timeout)) alarm.unwrap_spec = [ObjSpace, int] + at jit.dont_look_inside def pause(space): c_pause() return space.w_None @@ -173,6 +176,7 @@ raise OperationError(space.w_ValueError, space.wrap("signal number out of range")) + at jit.dont_look_inside def signal(space, signum, w_handler): """ signal(sig, action) -> action Modified: pypy/branch/fast-forward/pypy/module/thread/test/test_gil.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/thread/test/test_gil.py (original) +++ pypy/branch/fast-forward/pypy/module/thread/test/test_gil.py Thu Sep 9 01:00:13 2010 @@ -1,7 +1,6 @@ import time from pypy.module.thread import gil from pypy.module.thread.test import test_ll_thread -from pypy.rpython.lltypesystem import rffi from pypy.module.thread import ll_thread as thread from pypy.rlib.objectmodel import we_are_translated Modified: pypy/branch/fast-forward/pypy/objspace/flow/specialcase.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/flow/specialcase.py (original) +++ pypy/branch/fast-forward/pypy/objspace/flow/specialcase.py Thu Sep 9 01:00:13 2010 @@ -3,6 +3,7 @@ from pypy.interpreter.gateway import ApplevelClass from pypy.interpreter.error import OperationError from pypy.tool.cache import Cache +from pypy.rlib.rarithmetic import r_uint import py def sc_import(space, fn, args): @@ -120,6 +121,14 @@ pass return space.do_operation('simple_call', Constant(func), *args_w) +def sc_r_uint(space, r_uint, args): + args_w, kwds_w = args.unpack() + assert not kwds_w + [w_value] = args_w + if isinstance(w_value, Constant): + return Constant(r_uint(w_value.value)) + return space.do_operation('simple_call', space.wrap(r_uint), w_value) + def setup(space): # fn = pyframe.normalize_exception.get_function(space) # this is now routed through the objspace, directly. @@ -131,3 +140,7 @@ # if possible for fn in OperationName: space.specialcases[fn] = sc_operator + # special case to constant-fold r_uint(32-bit-constant) + # (normally, the 32-bit constant is a long, and is not allowed to + # show up in the flow graphs at all) + space.specialcases[r_uint] = sc_r_uint Modified: pypy/branch/fast-forward/pypy/objspace/std/callmethod.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/callmethod.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/callmethod.py Thu Sep 9 01:00:13 2010 @@ -12,7 +12,7 @@ from pypy.interpreter import function from pypy.objspace.descroperation import object_getattribute -from pypy.rlib import rstack # for resume points +from pypy.rlib import jit, rstack # for resume points # This module exports two extra methods for StdObjSpaceFrame implementing # the LOOKUP_METHOD and CALL_METHOD opcodes in an efficient way, as well @@ -56,16 +56,41 @@ f.pushvalue(w_value) f.pushvalue(None) -def CALL_METHOD(f, nargs, *ignored): - # 'nargs' is the argument count excluding the implicit 'self' - w_self = f.peekvalue(nargs) - w_callable = f.peekvalue(nargs + 1) - n = nargs + (w_self is not None) - try: - w_result = f.space.call_valuestack(w_callable, n, f) - rstack.resume_point("CALL_METHOD", f, nargs, returns=w_result) - finally: - f.dropvalues(nargs + 2) + at jit.unroll_safe +def CALL_METHOD(f, oparg, *ignored): + # opargs contains the arg, and kwarg count, excluding the implicit 'self' + n_args = oparg & 0xff + n_kwargs = (oparg >> 8) & 0xff + w_self = f.peekvalue(n_args + (2 * n_kwargs)) + n = n_args + (w_self is not None) + + if not n_kwargs: + w_callable = f.peekvalue(n_args + (2 * n_kwargs) + 1) + try: + w_result = f.space.call_valuestack(w_callable, n, f) + rstack.resume_point("CALL_METHOD", f, n_args, returns=w_result) + finally: + f.dropvalues(n_args + 2) + else: + keywords = [None] * n_kwargs + keywords_w = [None] * n_kwargs + while True: + n_kwargs -= 1 + if n_kwargs < 0: + break + w_value = f.popvalue() + w_key = f.popvalue() + key = f.space.str_w(w_key) + keywords[n_kwargs] = key + keywords_w[n_kwargs] = w_value + + arguments = f.popvalues(n) # includes w_self if it is not None + args = f.argument_factory(arguments, keywords, keywords_w, None, None) + if w_self is None: + f.popvalue() # removes w_self, which is None + w_callable = f.popvalue() + w_result = f.space.call_args(w_callable, args) + rstack.resume_point("CALL_METHOD_KW", f, returns=w_result) f.pushvalue(w_result) Modified: pypy/branch/fast-forward/pypy/objspace/std/intobject.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/intobject.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/intobject.py Thu Sep 9 01:00:13 2010 @@ -245,34 +245,36 @@ def lshift__Int_Int(space, w_int1, w_int2): a = w_int1.intval b = w_int2.intval + if r_uint(b) < LONG_BIT: # 0 <= b < LONG_BIT + try: + c = ovfcheck_lshift(a, b) + except OverflowError: + raise FailedToImplementArgs(space.w_OverflowError, + space.wrap("integer left shift")) + return wrapint(space, c) if b < 0: raise OperationError(space.w_ValueError, space.wrap("negative shift count")) - if a == 0 or b == 0: - return get_integer(space, w_int1) - if b >= LONG_BIT: + else: #b >= LONG_BIT + if a == 0: + return get_integer(space, w_int1) raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer left shift")) - try: - c = ovfcheck_lshift(a, b) - except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer left shift")) - return wrapint(space, c) def rshift__Int_Int(space, w_int1, w_int2): a = w_int1.intval b = w_int2.intval - if b < 0: - raise OperationError(space.w_ValueError, - space.wrap("negative shift count")) - if a == 0 or b == 0: - return get_integer(space, w_int1) - if b >= LONG_BIT: - if a < 0: - a = -1 - else: - a = 0 + if r_uint(b) >= LONG_BIT: # not (0 <= b < LONG_BIT) + if b < 0: + raise OperationError(space.w_ValueError, + space.wrap("negative shift count")) + else: # b >= LONG_BIT + if a == 0: + return get_integer(space, w_int1) + if a < 0: + a = -1 + else: + a = 0 else: a = a >> b return wrapint(space, a) Modified: pypy/branch/fast-forward/pypy/objspace/std/itertype.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/itertype.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/itertype.py Thu Sep 9 01:00:13 2010 @@ -1,5 +1,6 @@ from pypy.interpreter import gateway from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.interpreter.error import OperationError # ____________________________________________________________ @@ -8,6 +9,11 @@ XXX to do: remove this __reduce__ method and do a registration with copy_reg, instead. """ + + # cpython does not support pickling iterators but stackless python do + #msg = 'Pickling for iterators dissabled as cpython does not support it' + #raise OperationError(space.w_TypeError, space.wrap(msg)) + from pypy.objspace.std.iterobject import W_AbstractSeqIterObject assert isinstance(w_self, W_AbstractSeqIterObject) from pypy.interpreter.mixedmodule import MixedModule Modified: pypy/branch/fast-forward/pypy/objspace/std/model.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/model.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/model.py Thu Sep 9 01:00:13 2010 @@ -88,6 +88,8 @@ import pypy.objspace.std.default # register a few catch-all multimethods import pypy.objspace.std.marshal_impl # install marshal multimethods + if config.objspace.usemodules.array: + import pypy.module.array # the set of implementation types self.typeorder = { @@ -140,6 +142,8 @@ # check if we missed implementations for implcls in _registered_implementations: + if hasattr(implcls, 'register'): + implcls.register(self.typeorder) assert (implcls in self.typeorder or implcls in self.imported_but_not_registered), ( "please add %r in StdTypeModel.typeorder" % (implcls,)) Modified: pypy/branch/fast-forward/pypy/objspace/std/objspace.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/objspace.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/objspace.py Thu Sep 9 01:00:13 2010 @@ -405,8 +405,8 @@ def getattr(self, w_obj, w_name): if not self.config.objspace.std.getattributeshortcut: return DescrOperation.getattr(self, w_obj, w_name) - # an optional shortcut for performance + w_type = self.type(w_obj) w_descr = w_type.getattribute_if_not_from_object() if w_descr is not None: Modified: pypy/branch/fast-forward/pypy/objspace/std/test/test_callmethod.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/test/test_callmethod.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/test/test_callmethod.py Thu Sep 9 01:00:13 2010 @@ -106,6 +106,15 @@ else: raise Exception("did not raise?") """ + + def test_kwargs(self): + exec """if 1: + class C(object): + def f(self, a): + return a + 2 + + assert C().f(a=3) == 5 + """ class AppTestCallMethodWithGetattributeShortcut(AppTestCallMethod): Modified: pypy/branch/fast-forward/pypy/objspace/std/tupleobject.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/tupleobject.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/tupleobject.py Thu Sep 9 01:00:13 2010 @@ -11,7 +11,7 @@ class W_TupleObject(W_Object): from pypy.objspace.std.tupletype import tuple_typedef as typedef - _immutable_ = True + _immutable_fields_ = ['wrappeditems[*]'] def __init__(w_self, wrappeditems): make_sure_not_resized(wrappeditems) Modified: pypy/branch/fast-forward/pypy/rlib/debug.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/debug.py (original) +++ pypy/branch/fast-forward/pypy/rlib/debug.py Thu Sep 9 01:00:13 2010 @@ -255,11 +255,32 @@ class IntegerCanBeNegative(Exception): pass -def _check_nonneg(ann, bk): - from pypy.annotation.model import SomeInteger - s_nonneg = SomeInteger(nonneg=True) - if not s_nonneg.contains(ann): - raise IntegerCanBeNegative +class UnexpectedRUInt(Exception): + pass def check_nonneg(x): - check_annotation(x, _check_nonneg) + """Give a translation-time error if 'x' is not known to be non-negative. + To help debugging, this also gives a translation-time error if 'x' is + actually typed as an r_uint (in which case the call to check_nonneg() + is a bit strange and probably unexpected). + """ + assert type(x)(-1) < 0 # otherwise, 'x' is a r_uint or similar + assert x >= 0 + return x + +class Entry(ExtRegistryEntry): + _about_ = check_nonneg + + def compute_result_annotation(self, s_arg): + from pypy.annotation.model import SomeInteger + if isinstance(s_arg, SomeInteger) and s_arg.unsigned: + raise UnexpectedRUInt("check_nonneg() arg is a %s" % ( + s_arg.knowntype,)) + s_nonneg = SomeInteger(nonneg=True) + if not s_nonneg.contains(s_arg): + raise IntegerCanBeNegative + return s_arg + + def specialize_call(self, hop): + hop.exception_cannot_occur() + return hop.inputarg(hop.args_r[0], arg=0) Modified: pypy/branch/fast-forward/pypy/rlib/jit.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/jit.py (original) +++ pypy/branch/fast-forward/pypy/rlib/jit.py Thu Sep 9 01:00:13 2010 @@ -253,8 +253,7 @@ def __init__(self, greens=None, reds=None, virtualizables=None, get_jitcell_at=None, set_jitcell_at=None, - can_inline=None, get_printable_location=None, - confirm_enter_jit=None): + get_printable_location=None, confirm_enter_jit=None): if greens is not None: self.greens = greens if reds is not None: @@ -270,7 +269,6 @@ self.get_jitcell_at = get_jitcell_at self.set_jitcell_at = set_jitcell_at self.get_printable_location = get_printable_location - self.can_inline = can_inline self.confirm_enter_jit = confirm_enter_jit def _freeze_(self): @@ -284,6 +282,10 @@ # special-cased by ExtRegistryEntry assert dict.fromkeys(livevars) == _self._alllivevars + def loop_header(self): + # special-cased by ExtRegistryEntry + pass + def _set_param(self, name, value): # special-cased by ExtRegistryEntry # (internal, must receive a constant 'name') @@ -323,11 +325,15 @@ # specifically for them. self.jit_merge_point = self.jit_merge_point self.can_enter_jit = self.can_enter_jit + self.loop_header = self.loop_header self._set_param = self._set_param class Entry(ExtEnterLeaveMarker): _about_ = (self.jit_merge_point, self.can_enter_jit) + class Entry(ExtLoopHeader): + _about_ = self.loop_header + class Entry(ExtSetParam): _about_ = self._set_param @@ -384,7 +390,6 @@ self.annotate_hook(driver.get_jitcell_at, driver.greens, **kwds_s) self.annotate_hook(driver.set_jitcell_at, driver.greens, [s_jitcell], **kwds_s) - self.annotate_hook(driver.can_inline, driver.greens, **kwds_s) self.annotate_hook(driver.get_printable_location, driver.greens, **kwds_s) def annotate_hook(self, func, variables, args_s=[], **kwds_s): @@ -425,6 +430,23 @@ return hop.genop('jit_marker', vlist, resulttype=lltype.Void) +class ExtLoopHeader(ExtRegistryEntry): + # Replace a call to myjitdriver.loop_header() + # with an operation jit_marker('loop_header', myjitdriver). + + def compute_result_annotation(self, **kwds_s): + from pypy.annotation import model as annmodel + return annmodel.s_None + + def specialize_call(self, hop): + from pypy.rpython.lltypesystem import lltype + driver = self.instance.im_self + hop.exception_cannot_occur() + vlist = [hop.inputconst(lltype.Void, 'loop_header'), + hop.inputconst(lltype.Void, driver)] + return hop.genop('jit_marker', vlist, + resulttype=lltype.Void) + class ExtSetParam(ExtRegistryEntry): def compute_result_annotation(self, s_name, s_value): Modified: pypy/branch/fast-forward/pypy/rlib/objectmodel.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/objectmodel.py (original) +++ pypy/branch/fast-forward/pypy/rlib/objectmodel.py Thu Sep 9 01:00:13 2010 @@ -19,32 +19,80 @@ # def f(... # -class _AttachSpecialization(object): +class _Specialize(object): + def memo(self): + """ Specialize functions based on argument values. All arguments has + to be constant at the compile time. The whole function call is replaced + by a call result then. + """ + def decorated_func(func): + func._annspecialcase_ = 'specialize:memo' + return func + return decorated_func - def __init__(self, tag): - self.tag = tag + def arg(self, *args): + """ Specialize function based on values of given positions of arguments. + They must be compile-time constants in order to work. + + There will be a copy of provided function for each combination + of given arguments on positions in args (that can lead to + exponential behavior!). + """ + def decorated_func(func): + func._annspecialcase_ = 'specialize:arg' + self._wrap(args) + return func - def __call__(self, *args): - if not args: - args = "" - else: - args = "("+','.join([repr(arg) for arg in args]) +")" - specialcase = "specialize:%s%s" % (self.tag, args) - - def specialize_decorator(func): - "NOT_RPYTHON" - func._annspecialcase_ = specialcase + return decorated_func + + def argtype(self, *args): + """ Specialize function based on types of arguments on given positions. + + There will be a copy of provided function for each combination + of given arguments on positions in args (that can lead to + exponential behavior!). + """ + def decorated_func(func): + func._annspecialcase_ = 'specialize:argtype' + self._wrap(args) return func - return specialize_decorator - -class _Specialize(object): + return decorated_func - def __getattr__(self, name): - return _AttachSpecialization(name) + def ll(self): + """ This is version of argtypes that cares about low-level types + (so it'll get additional copies for two different types of pointers + for example). Same warnings about exponential behavior apply. + """ + def decorated_func(func): + func._annspecialcase_ = 'specialize:ll' + return func + + return decorated_func + + def ll_and_arg(self, arg): + """ XXX what does that do? + """ + def decorated_func(func): + func._annspecialcase_ = 'specialize:ll_and_arg(%d)' % arg + return func + + return decorated_func + + def _wrap(self, args): + return "("+','.join([repr(arg) for arg in args]) +")" specialize = _Specialize() +def enforceargs(*args): + """ Decorate a function with forcing of RPython-level types on arguments. + None means no enforcing. + + XXX shouldn't we also add asserts in function body? + """ + def decorator(f): + f._annenforceargs_ = args + return f + return decorator + # ____________________________________________________________ class Symbolic(object): Modified: pypy/branch/fast-forward/pypy/rlib/rarithmetic.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rarithmetic.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rarithmetic.py Thu Sep 9 01:00:13 2010 @@ -167,7 +167,7 @@ def widen(n): from pypy.rpython.lltypesystem import lltype if _should_widen_type(lltype.typeOf(n)): - return int(n) + return intmask(n) else: return n widen._annspecialcase_ = 'specialize:argtype(0)' Modified: pypy/branch/fast-forward/pypy/rlib/rlocale.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rlocale.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rlocale.py Thu Sep 9 01:00:13 2010 @@ -15,6 +15,12 @@ HAVE_LANGINFO = sys.platform != 'win32' HAVE_LIBINTL = sys.platform != 'win32' +if HAVE_LIBINTL: + try: + platform.verify_eci(ExternalCompilationInfo(includes=['libintl.h'])) + except platform.CompilationError: + HAVE_LIBINTL = False + class CConfig: includes = ['locale.h', 'limits.h'] Modified: pypy/branch/fast-forward/pypy/rlib/rmmap.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rmmap.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rmmap.py Thu Sep 9 01:00:13 2010 @@ -646,8 +646,14 @@ hintp = rffi.cast(PTR, hint.pos) res = c_mmap_safe(hintp, map_size, prot, flags, -1, 0) if res == rffi.cast(PTR, -1): - raise MemoryError - hint.pos += map_size + # some systems (some versions of OS/X?) complain if they + # are passed a non-zero address. Try again. + hintp = rffi.cast(PTR, 0) + res = c_mmap_safe(hintp, map_size, prot, flags, -1, 0) + if res == rffi.cast(PTR, -1): + raise MemoryError + else: + hint.pos += map_size return res alloc._annenforceargs_ = (int,) Modified: pypy/branch/fast-forward/pypy/rlib/rposix.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rposix.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rposix.py Thu Sep 9 01:00:13 2010 @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem import lltype, ll2ctypes, rffi from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rlib.rarithmetic import intmask +from pypy.rlib.objectmodel import specialize class CConstantErrno(CConstant): # these accessors are used when calling get_errno() or set_errno() @@ -42,3 +43,102 @@ os.close(fd) except OSError: pass + +#___________________________________________________________________ +# Wrappers around posix functions, that accept either strings, or +# instances with a "as_bytes()" method. +# - pypy.modules.posix.interp_posix passes an object containing a unicode path +# which can encode itself with sys.filesystemencoding. +# - but pypy.rpython.module.ll_os.py on Windows will replace these functions +# with other wrappers that directly handle unicode strings. + at specialize.argtype(0) +def open(path, flags, mode): + if isinstance(path, str): + return os.open(path, flags, mode) + else: + return os.open(path.as_bytes(), flags, mode) + + at specialize.argtype(0) +def stat(path): + if isinstance(path, str): + return os.stat(path) + else: + return os.stat(path.as_bytes()) + + at specialize.argtype(0) +def lstat(path): + if isinstance(path, str): + return os.lstat(path) + else: + return os.lstat(path.as_bytes()) + + at specialize.argtype(0) +def unlink(path): + if isinstance(path, str): + return os.unlink(path) + else: + return os.unlink(path.as_bytes()) + + at specialize.argtype(0, 1) +def rename(path1, path2): + if isinstance(path1, str): + return os.rename(path1, path2) + else: + return os.rename(path1.as_bytes(), path2.as_bytes()) + + at specialize.argtype(0) +def listdir(dirname): + if isinstance(dirname, str): + return os.listdir(dirname) + else: + return os.listdir(dirname.as_bytes()) + + at specialize.argtype(0) +def access(path, mode): + if isinstance(path, str): + return os.access(path, mode) + else: + return os.access(path.as_bytes(), mode) + + at specialize.argtype(0) +def chmod(path, mode): + if isinstance(path, str): + return os.chmod(path, mode) + else: + return os.chmod(path.as_bytes(), mode) + + at specialize.argtype(0, 1) +def utime(path, times): + if isinstance(path, str): + return os.utime(path, times) + else: + return os.utime(path.as_bytes(), times) + + at specialize.argtype(0) +def chdir(path): + if isinstance(path, str): + return os.chdir(path) + else: + return os.chdir(path.as_bytes()) + + at specialize.argtype(0) +def mkdir(path, mode=0777): + if isinstance(path, str): + return os.mkdir(path, mode) + else: + return os.mkdir(path.as_bytes(), mode) + + at specialize.argtype(0) +def rmdir(path): + if isinstance(path, str): + return os.rmdir(path) + else: + return os.rmdir(path.as_bytes()) + +if os.name == 'nt': + import nt + def _getfullpathname(path): + if isinstance(path, str): + return nt._getfullpathname(path) + else: + return nt._getfullpathname(path.as_bytes()) Modified: pypy/branch/fast-forward/pypy/rlib/rsha.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rsha.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rsha.py Thu Sep 9 01:00:13 2010 @@ -88,7 +88,7 @@ 0xCA62C1D6L # (60 <= t <= 79) ] -unroll_f_K = unrolling_iterable(zip(f, K)) +unroll_f_K = unrolling_iterable(zip(f, map(r_uint, K))) if UNROLL_ALL: unroll_range_20 = unrolling_iterable(range(20)) Modified: pypy/branch/fast-forward/pypy/rlib/rsocket.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rsocket.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rsocket.py Thu Sep 9 01:00:13 2010 @@ -100,8 +100,6 @@ def lock(self, TYPE=_c.sockaddr): """Return self.addr_p, cast as a pointer to TYPE. Must call unlock()! """ - if not (self.minlen <= self.addrlen <= self.maxlen): - raise RSocketError("invalid address") return rffi.cast(lltype.Ptr(TYPE), self.addr_p) lock._annspecialcase_ = 'specialize:ll' Modified: pypy/branch/fast-forward/pypy/rlib/rweakref.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rweakref.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rweakref.py Thu Sep 9 01:00:13 2010 @@ -78,7 +78,7 @@ return self.__class__, def method_get(self, s_key): - assert isinstance(s_key, annmodel.SomeString) + assert annmodel.SomeString(can_be_None=True).contains(s_key) return annmodel.SomeInstance(self.valueclassdef, can_be_None=True) def method_set(self, s_key, s_value): Modified: pypy/branch/fast-forward/pypy/rlib/rwin32.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rwin32.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rwin32.py Thu Sep 9 01:00:13 2010 @@ -31,6 +31,7 @@ DWORD = rffi_platform.SimpleType("DWORD", rffi.UINT) BOOL = rffi_platform.SimpleType("BOOL", rffi.LONG) BYTE = rffi_platform.SimpleType("BYTE", rffi.UCHAR) + WCHAR = rffi_platform.SimpleType("WCHAR", rffi.UCHAR) INT = rffi_platform.SimpleType("INT", rffi.INT) LONG = rffi_platform.SimpleType("LONG", rffi.LONG) PLONG = rffi_platform.SimpleType("PLONG", rffi.LONGP) @@ -38,6 +39,8 @@ LPCVOID = rffi_platform.SimpleType("LPCVOID", rffi.VOIDP) LPSTR = rffi_platform.SimpleType("LPSTR", rffi.CCHARP) LPCSTR = rffi_platform.SimpleType("LPCSTR", rffi.CCHARP) + LPWSTR = rffi_platform.SimpleType("LPWSTR", rffi.CWCHARP) + LPCWSTR = rffi_platform.SimpleType("LPCWSTR", rffi.CWCHARP) LPDWORD = rffi_platform.SimpleType("LPDWORD", rffi.INTP) SIZE_T = rffi_platform.SimpleType("SIZE_T", rffi.SIZE_T) ULONG_PTR = rffi_platform.SimpleType("ULONG_PTR", rffi.ULONG) @@ -87,6 +90,10 @@ GetLastError = winexternal('GetLastError', [], DWORD) SetLastError = winexternal('SetLastError', [DWORD], lltype.Void) + # In tests, the first call to GetLastError is always wrong, because error + # is hidden by operations in ll2ctypes. Call it now. + GetLastError() + LoadLibrary = winexternal('LoadLibraryA', [rffi.CCHARP], rffi.VOIDP) GetProcAddress = winexternal('GetProcAddress', [rffi.VOIDP, rffi.CCHARP], @@ -129,13 +136,29 @@ } return 0; }''') - exename = static_platform.compile( - [cfile], ExternalCompilationInfo(), - outputfilename = "dosmaperr", - standalone=True) - output = os.popen(str(exename)) - errors = dict(map(int, line.split()) - for line in output) + try: + exename = static_platform.compile( + [cfile], ExternalCompilationInfo(), + outputfilename = "dosmaperr", + standalone=True) + except WindowsError: + # Fallback for the mingw32 compiler + errors = { + 2: 2, 3: 2, 4: 24, 5: 13, 6: 9, 7: 12, 8: 12, 9: 12, 10: 7, + 11: 8, 15: 2, 16: 13, 17: 18, 18: 2, 19: 13, 20: 13, 21: 13, + 22: 13, 23: 13, 24: 13, 25: 13, 26: 13, 27: 13, 28: 13, + 29: 13, 30: 13, 31: 13, 32: 13, 33: 13, 34: 13, 35: 13, + 36: 13, 53: 2, 65: 13, 67: 2, 80: 17, 82: 13, 83: 13, 89: 11, + 108: 13, 109: 32, 112: 28, 114: 9, 128: 10, 129: 10, 130: 9, + 132: 13, 145: 41, 158: 13, 161: 2, 164: 11, 167: 13, 183: 17, + 188: 8, 189: 8, 190: 8, 191: 8, 192: 8, 193: 8, 194: 8, + 195: 8, 196: 8, 197: 8, 198: 8, 199: 8, 200: 8, 201: 8, + 202: 8, 206: 2, 215: 11, 1816: 12, + } + else: + output = os.popen(str(exename)) + errors = dict(map(int, line.split()) + for line in output) return errors, errno.EINVAL # A bit like strerror... Modified: pypy/branch/fast-forward/pypy/rlib/rzipfile.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rzipfile.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rzipfile.py Thu Sep 9 01:00:13 2010 @@ -19,12 +19,12 @@ def crc32(s, crc=0): result = 0 - crc = ~r_uint(crc) & 0xffffffffL + crc = ~r_uint(crc) & r_uint(0xffffffffL) for c in s: crc = rcrc_32_tab[(crc ^ r_uint(ord(c))) & 0xffL] ^ (crc >> 8) #/* Note: (crc >> 8) MUST zero fill on left - result = crc ^ 0xffffffffL + result = crc ^ r_uint(0xffffffffL) return result @@ -194,7 +194,7 @@ (x.create_version, x.create_system, x.extract_version, x.reserved, x.flag_bits, x.compress_type, t, d, crc, x.compress_size, x.file_size) = centdir[1:12] - x.CRC = r_uint(crc) & 0xffffffff + x.CRC = r_uint(crc) & r_uint(0xffffffff) x.dostime = t x.dosdate = d x.volume, x.internal_attr, x.external_attr = centdir[15:18] Modified: pypy/branch/fast-forward/pypy/rlib/streamio.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/streamio.py (original) +++ pypy/branch/fast-forward/pypy/rlib/streamio.py Thu Sep 9 01:00:13 2010 @@ -38,7 +38,9 @@ # import os, sys +from pypy.rlib.objectmodel import specialize from pypy.rlib.rarithmetic import r_longlong, intmask +from pypy.rlib import rposix from os import O_RDONLY, O_WRONLY, O_RDWR, O_CREAT, O_TRUNC O_BINARY = getattr(os, "O_BINARY", 0) @@ -71,6 +73,7 @@ return s.join(string.split(c)) + at specialize.argtype(0) def open_file_as_stream(path, mode="r", buffering=-1): os_flags, universal, reading, writing, basemode, binary = decode_mode(mode) stream = open_path_helper(path, os_flags, basemode == "a") @@ -89,9 +92,10 @@ return construct_stream_tower(stream, buffering, universal, reading, writing, binary) + at specialize.argtype(0) def open_path_helper(path, os_flags, append): # XXX for now always return DiskFile - fd = os.open(path, os_flags, 0666) + fd = rposix.open(path, os_flags, 0666) if append: try: os.lseek(fd, 0, 2) Modified: pypy/branch/fast-forward/pypy/rlib/test/test_jit.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/test/test_jit.py (original) +++ pypy/branch/fast-forward/pypy/rlib/test/test_jit.py Thu Sep 9 01:00:13 2010 @@ -59,11 +59,9 @@ def test_annotate_hooks(self): - def can_inline(m): pass def get_printable_location(m): pass myjitdriver = JitDriver(greens=['m'], reds=['n'], - can_inline=can_inline, get_printable_location=get_printable_location) def fn(n): m = 42.5 @@ -81,9 +79,8 @@ return [v.concretetype for v in graph.getargs()] raise Exception, 'function %r has not been annotated' % func - can_inline_args = getargs(can_inline) get_printable_location_args = getargs(get_printable_location) - assert can_inline_args == get_printable_location_args == [lltype.Float] + assert get_printable_location_args == [lltype.Float] def test_annotate_argumenterror(self): myjitdriver = JitDriver(greens=['m'], reds=['n']) Modified: pypy/branch/fast-forward/pypy/rlib/test/test_objectmodel.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/test/test_objectmodel.py (original) +++ pypy/branch/fast-forward/pypy/rlib/test/test_objectmodel.py Thu Sep 9 01:00:13 2010 @@ -404,6 +404,13 @@ assert f._annspecialcase_ == 'specialize:arg(1)' +def test_enforceargs_decorator(): + @enforceargs(int, str, None) + def f(a, b, c): + pass + + assert f._annenforceargs_ == (int, str, None) + def getgraph(f, argtypes): from pypy.translator.translator import TranslationContext, graphof from pypy.translator.backendopt.all import backend_optimizations Modified: pypy/branch/fast-forward/pypy/rpython/extfunc.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/extfunc.py (original) +++ pypy/branch/fast-forward/pypy/rpython/extfunc.py Thu Sep 9 01:00:13 2010 @@ -52,7 +52,10 @@ return super(ExtRegistryEntry, self).__getattr__(attr) raise exc, exc_inst, tb -def registering(func): +def registering(func, condition=True): + if not condition: + return lambda method: None + def decorator(method): method._registering_func = func return method @@ -63,11 +66,9 @@ func = getattr(ns, name) except AttributeError: condition = False + func = None - if condition: - return registering(func) - else: - return lambda method: None + return registering(func, condition=condition) class LazyRegisteringMeta(type): def __new__(self, _name, _type, _vars): @@ -167,8 +168,6 @@ return signature_args def compute_result_annotation(self, *args_s): - if hasattr(self, 'ann_hook'): - self.ann_hook() self.normalize_args(*args_s) # check arguments return self.signature_result @@ -235,7 +234,6 @@ def register_external(function, args, result=None, export_name=None, llimpl=None, ooimpl=None, llfakeimpl=None, oofakeimpl=None, - annotation_hook=None, sandboxsafe=False): """ function: the RPython function that will be rendered as an external function (e.g.: math.floor) @@ -244,7 +242,6 @@ export_name: the name of the function as it will be seen by the backends llimpl, ooimpl: optional; if provided, these RPython functions are called instead of the target function llfakeimpl, oofakeimpl: optional; if provided, they are called by the llinterpreter - annotationhook: optional; a callable that is called during annotation, useful for genc hacks sandboxsafe: use True if the function performs no I/O (safe for --sandbox) """ @@ -271,8 +268,6 @@ lltypefakeimpl = staticmethod(llfakeimpl) if oofakeimpl: ootypefakeimpl = staticmethod(oofakeimpl) - if annotation_hook: - ann_hook = staticmethod(annotation_hook) if export_name: FunEntry.__name__ = export_name Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py Thu Sep 9 01:00:13 2010 @@ -24,6 +24,7 @@ from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from pypy.rpython import raddress from pypy.translator.platform import platform +from array import array def uaddressof(obj): return fixid(ctypes.addressof(obj)) @@ -756,7 +757,15 @@ elif T is lltype.Char: llobj = chr(cobj) elif T is lltype.UniChar: - llobj = unichr(cobj) + try: + llobj = unichr(cobj) + except (ValueError, OverflowError): + for tc in 'HIL': + if array(tc).itemsize == array('u').itemsize: + llobj = array('u', array(tc, (cobj,)).tostring())[0] + break + else: + raise elif T is lltype.Signed: llobj = cobj elif T is lltype.Bool: Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/llgroup.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/llgroup.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/llgroup.py Thu Sep 9 01:00:13 2010 @@ -99,6 +99,7 @@ '&~0xFFFF' or with a direct masking like '&0x10000' (resp. on 64-bit platform, with '&~0xFFFFFFFF' or '&0x100000000'). """ + __slots__ = ['lowpart', 'rest'] MASK = (1<= HALFSHIFT + return self.rest >> other + def __eq__(self, other): if (isinstance(other, CombinedSymbolic) and self.lowpart is other.lowpart): Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/llmemory.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/llmemory.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/llmemory.py Thu Sep 9 01:00:13 2010 @@ -361,19 +361,27 @@ # ____________________________________________________________ +def _sizeof_none(TYPE): + assert not TYPE._is_varsize() + return ItemOffset(TYPE) +_sizeof_none._annspecialcase_ = 'specialize:memo' + +def _sizeof_int(TYPE, n): + "NOT_RPYTHON" + if isinstance(TYPE, lltype.Struct): + return FieldOffset(TYPE, TYPE._arrayfld) + \ + itemoffsetof(TYPE._flds[TYPE._arrayfld], n) + else: + raise Exception("don't know how to take the size of a %r"%TYPE) + def sizeof(TYPE, n=None): if n is None: - assert not TYPE._is_varsize() - return ItemOffset(TYPE) + return _sizeof_none(TYPE) + elif isinstance(TYPE, lltype.Array): + return itemoffsetof(TYPE) + _sizeof_none(TYPE.OF) * n else: - if isinstance(TYPE, lltype.Array): - return itemoffsetof(TYPE, n) - elif isinstance(TYPE, lltype.Struct): - return FieldOffset(TYPE, TYPE._arrayfld) + \ - itemoffsetof(TYPE._flds[TYPE._arrayfld], n) - else: - raise Exception("don't know how to take the size of a %r"%TYPE) -sizeof._annspecialcase_ = 'specialize:memo' # only for n == None + return _sizeof_int(TYPE, n) +sizeof._annspecialcase_ = 'specialize:arg(0)' def offsetof(TYPE, fldname): assert fldname in TYPE._flds @@ -389,6 +397,7 @@ # ------------------------------------------------------------- class fakeaddress(object): + __slots__ = ['ptr'] # NOTE: the 'ptr' in the addresses must be normalized. # Use cast_ptr_to_adr() instead of directly fakeaddress() if unsure. def __init__(self, ptr): @@ -530,7 +539,6 @@ pass NULL = fakeaddress(None) -NULL.intaddress = 0 # this is to make memory.lladdress more happy Address = lltype.Primitive("Address", NULL) # GCREF is similar to Address but it is GC-aware Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/lloperation.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/lloperation.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/lloperation.py Thu Sep 9 01:00:13 2010 @@ -85,16 +85,20 @@ fold = roproperty(get_fold_impl) def is_pure(self, args_v): - return (self.canfold or # canfold => pure operation - self is llop.debug_assert or # debug_assert is pure enough - # reading from immutable - (self in (llop.getfield, llop.getarrayitem) and - args_v[0].concretetype.TO._hints.get('immutable')) or - (self is llop.getfield and # reading from immutable_field - 'immutable_fields' in args_v[0].concretetype.TO._hints and - args_v[1].value in args_v[0].concretetype.TO - ._hints['immutable_fields'].fields)) - # XXX: what about ootype immutable arrays? + if self.canfold: # canfold => pure operation + return True + if self is llop.debug_assert: # debug_assert is pure enough + return True + # reading from immutable (lltype) + if self is llop.getfield or self is llop.getarrayitem: + field = getattr(args_v[1], 'value', None) + return args_v[0].concretetype.TO._immutable_field(field) + # reading from immutable (ootype) (xxx what about arrays?) + if self is llop.oogetfield: + field = getattr(args_v[1], 'value', None) + return args_v[0].concretetype._immutable_field(field) + # default + return False def __repr__(self): return '' % (getattr(self, 'opname', '?'),) Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/lltype.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/lltype.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/lltype.py Thu Sep 9 01:00:13 2010 @@ -37,7 +37,7 @@ return ''%(self.TYPE,) -def saferecursive(func, defl): +def saferecursive(func, defl, TLS=TLS): def safe(*args): try: seeing = TLS.seeing @@ -54,7 +54,7 @@ return safe #safe_equal = saferecursive(operator.eq, True) -def safe_equal(x, y): +def safe_equal(x, y, TLS=TLS): # a specialized version for performance try: seeing = TLS.seeing_eq @@ -97,7 +97,7 @@ raise TypeError return value - def __hash__(self): + def __hash__(self, TLS=TLS): # cannot use saferecursive() -- see test_lltype.test_hash(). # NB. the __cached_hash should neither be used nor updated # if we enter with hash_level > 0, because the computed @@ -297,6 +297,15 @@ n = 1 return _struct(self, n, initialization='example') + def _immutable_field(self, field): + if 'immutable_fields' in self._hints: + try: + s = self._hints['immutable_fields'].fields[field] + return s or True + except KeyError: + pass + return self._hints.get('immutable', False) + class RttiStruct(Struct): _runtime_type_info = None @@ -391,6 +400,9 @@ def _container_example(self): return _array(self, 1, initialization='example') + def _immutable_field(self, index=None): + return self._hints.get('immutable', False) + class GcArray(Array): _gckind = 'gc' def _inline_is_varsize(self, last): @@ -401,6 +413,19 @@ # behaves more or less like a Struct with fields item0, item1, ... # but also supports __getitem__(), __setitem__(), __len__(). + _cache = weakref.WeakValueDictionary() # cache the length-1 FixedSizeArrays + def __new__(cls, OF, length, **kwds): + if length == 1 and not kwds: + try: + obj = FixedSizeArray._cache[OF] + except KeyError: + obj = FixedSizeArray._cache[OF] = Struct.__new__(cls) + except TypeError: + obj = Struct.__new__(cls) + else: + obj = Struct.__new__(cls) + return obj + def __init__(self, OF, length, **kwds): fields = [('item%d' % i, OF) for i in range(length)] super(FixedSizeArray, self).__init__('array%d' % length, *fields, @@ -610,11 +635,22 @@ class Ptr(LowLevelType): __name__ = property(lambda self: '%sPtr' % self.TO.__name__) - def __init__(self, TO): + _cache = weakref.WeakValueDictionary() # cache the Ptrs + def __new__(cls, TO, use_cache=True): if not isinstance(TO, ContainerType): raise TypeError, ("can only point to a Container type, " "not to %s" % (TO,)) - self.TO = TO + if not use_cache: + obj = LowLevelType.__new__(cls) + else: + try: + return Ptr._cache[TO] + except KeyError: + obj = Ptr._cache[TO] = LowLevelType.__new__(cls) + except TypeError: + obj = LowLevelType.__new__(cls) + obj.TO = TO + return obj def _needsgc(self): # XXX deprecated interface Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/opimpl.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/opimpl.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/opimpl.py Thu Sep 9 01:00:13 2010 @@ -150,12 +150,7 @@ # we can constant-fold this if the innermost structure from which we # read the final field is immutable. T = lltype.typeOf(innermostcontainer).TO - if T._hints.get('immutable'): - pass - elif ('immutable_fields' in T._hints and - offsets[-1] in T._hints['immutable_fields'].fields): - pass - else: + if not T._immutable_field(offsets[-1]): raise TypeError("cannot fold getinteriorfield on mutable struct") assert not isinstance(ob, lltype._interior_ptr) return ob @@ -197,6 +192,18 @@ assert isinstance(y, int) return intmask(x - y) +def op_int_ge(x, y): + # special case for 'AddressOffset >= 0' + assert isinstance(x, (int, llmemory.AddressOffset)) + assert isinstance(y, int) + return x >= y + +def op_int_lt(x, y): + # special case for 'AddressOffset < 0' + assert isinstance(x, (int, llmemory.AddressOffset)) + assert isinstance(y, int) + return x < y + def op_int_between(a, b, c): assert lltype.typeOf(a) is lltype.Signed assert lltype.typeOf(b) is lltype.Signed @@ -222,6 +229,13 @@ assert isinstance(y, (int, llmemory.AddressOffset)) return intmask(x * y) +def op_int_rshift(x, y): + if not isinstance(x, int): + from pypy.rpython.lltypesystem import llgroup + assert isinstance(x, llgroup.CombinedSymbolic) + assert isinstance(y, int) + return x >> y + def op_int_floordiv(x, y): assert isinstance(x, (int, llmemory.AddressOffset)) assert isinstance(y, (int, llmemory.AddressOffset)) @@ -418,19 +432,15 @@ def op_getfield(p, name): checkptr(p) TYPE = lltype.typeOf(p).TO - if TYPE._hints.get('immutable'): - pass - elif ('immutable_fields' in TYPE._hints and - name in TYPE._hints['immutable_fields'].fields): - pass - else: + if not TYPE._immutable_field(name): raise TypeError("cannot fold getfield on mutable struct") return getattr(p, name) def op_getarrayitem(p, index): checkptr(p) - if not lltype.typeOf(p).TO._hints.get('immutable'): - raise TypeError("cannot fold getfield on mutable array") + ARRAY = lltype.typeOf(p).TO + if not ARRAY._immutable_field(index): + raise TypeError("cannot fold getarrayitem on mutable array") return p[index] def _normalize(x): Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/rffi.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/rffi.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/rffi.py Thu Sep 9 01:00:13 2010 @@ -176,6 +176,15 @@ # XXX leaks if a str2charp() fails with MemoryError # and was not the first in this function freeme = arg + elif TARGET == CWCHARP: + if arg is None: + arg = lltype.nullptr(CWCHARP.TO) # None => (wchar_t*)NULL + freeme = arg + elif isinstance(arg, unicode): + arg = unicode2wcharp(arg) + # XXX leaks if a unicode2wcharp() fails with MemoryError + # and was not the first in this function + freeme = arg elif _isfunctype(TARGET) and not _isllptr(arg): # XXX pass additional arguments if invoke_around_handlers: @@ -584,9 +593,12 @@ """ str -> char* """ array = lltype.malloc(TYPEP.TO, len(s) + 1, flavor='raw') - for i in range(len(s)): + i = len(s) + array[i] = lastchar + i -= 1 + while i >= 0: array[i] = s[i] - array[len(s)] = lastchar + i -= 1 return array str2charp._annenforceargs_ = [strtype] Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/rstr.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/rstr.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/rstr.py Thu Sep 9 01:00:13 2010 @@ -2,7 +2,7 @@ from pypy.tool.pairtype import pairtype from pypy.rpython.error import TyperError from pypy.rlib.objectmodel import malloc_zero_filled, we_are_translated -from pypy.rlib.objectmodel import _hash_string +from pypy.rlib.objectmodel import _hash_string, enforceargs from pypy.rlib.debug import ll_assert from pypy.rlib.jit import purefunction from pypy.rpython.robject import PyObjRepr, pyobj_repr @@ -56,6 +56,7 @@ llmemory.itemoffsetof(TP.chars, 0) + llmemory.sizeof(CHAR_TP) * item) + @enforceargs(None, None, int, int, int) def copy_string_contents(src, dst, srcstart, dststart, length): assert srcstart >= 0 assert dststart >= 0 @@ -674,6 +675,7 @@ res_index += item_len i += 1 return result + ll_join_strs._annenforceargs_ = [int, None] def ll_join_chars(length, chars, RES): # no need to optimize this, will be replaced by string builder Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_llgroup.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_llgroup.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_llgroup.py Thu Sep 9 01:00:13 2010 @@ -105,6 +105,8 @@ assert p == test.p1b assert cslist[0] & ~MASK == 0x45 << HALFSHIFT assert cslist[1] & ~MASK == 0x41 << HALFSHIFT + assert cslist[0] >> HALFSHIFT == 0x45 + assert cslist[1] >> (HALFSHIFT+1) == 0x41 >> 1 # return 42 return f Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_lloperation.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_lloperation.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_lloperation.py Thu Sep 9 01:00:13 2010 @@ -88,7 +88,7 @@ accessor = rclass.FieldListAccessor() S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), hints={'immutable_fields': accessor}) - accessor.initialize(S3, ['x']) + accessor.initialize(S3, {'x': ''}) v_s3 = Variable() v_s3.concretetype = lltype.Ptr(S3) assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) @@ -103,7 +103,7 @@ accessor = rclass.FieldListAccessor() S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), hints={'immutable_fields': accessor}) - accessor.initialize(S3, ['x']) + accessor.initialize(S3, {'x': ''}) # s1 = lltype.malloc(S1); s1.x = 45 py.test.raises(TypeError, llop.getfield, lltype.Signed, s1, 'x') Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_lltype.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_lltype.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_lltype.py Thu Sep 9 01:00:13 2010 @@ -781,6 +781,28 @@ p = cast_opaque_ptr(llmemory.GCREF, a) assert hash1 == identityhash(p) +def test_immutable_hint(): + S = GcStruct('S', ('x', lltype.Signed)) + assert S._immutable_field('x') == False + # + S = GcStruct('S', ('x', lltype.Signed), hints={'immutable': True}) + assert S._immutable_field('x') == True + # + class FieldListAccessor(object): + def __init__(self, fields): + self.fields = fields + S = GcStruct('S', ('x', lltype.Signed), + hints={'immutable_fields': FieldListAccessor({'x':''})}) + assert S._immutable_field('x') == True + # + class FieldListAccessor(object): + def __init__(self, fields): + self.fields = fields + S = GcStruct('S', ('x', lltype.Signed), + hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) + assert S._immutable_field('x') == '[*]' + + class TestTrackAllocation: def setup_method(self, func): start_tracking_allocations() Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_rffi.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_rffi.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_rffi.py Thu Sep 9 01:00:13 2010 @@ -186,6 +186,11 @@ def test_externvar(self): import os + if os.name == 'nt': + # Windows CRT badly aborts when an invalid fd is used. + bad_fd = 0 + else: + bad_fd = 12312312 def f(): set_errno(12) @@ -193,7 +198,7 @@ def g(): try: - os.write(12312312, "xxx") + os.write(bad_fd, "xxx") except OSError: pass return get_errno() Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py Thu Sep 9 01:00:13 2010 @@ -86,8 +86,7 @@ addr -= self.gcheaderbuilder.size_gc_header return llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR)) - def get_size(self, obj): - typeid = self.get_type_id(obj) + def _get_size_for_typeid(self, obj, typeid): size = self.fixed_size(typeid) if self.is_varsize(typeid): lenaddr = obj + self.varsize_offset_to_length(typeid) @@ -99,6 +98,9 @@ # gctypelayout.encode_type_shape() return size + def get_size(self, obj): + return self._get_size_for_typeid(obj, self.get_type_id(obj)) + def malloc(self, typeid, length=0, zero=False): """For testing. The interface used by the gctransformer is the four malloc_[fixed,var]size[_clear]() functions. @@ -218,7 +220,6 @@ pending = self._debug_pending while pending.non_empty(): obj = pending.pop() - self.debug_check_object(obj) self.trace(obj, self._debug_callback2, None) self._debug_seen.delete() self._debug_pending.delete() @@ -227,6 +228,7 @@ seen = self._debug_seen if not seen.contains(obj): seen.add(obj) + self.debug_check_object(obj) self._debug_pending.append(obj) def _debug_callback(self, root): obj = root.address[0] @@ -348,3 +350,23 @@ globals(), locals(), [classname]) GCClass = getattr(module, classname) return GCClass, GCClass.TRANSLATION_PARAMS + +def read_from_env(varname): + import os + value = os.environ.get(varname) + if value: + realvalue = value[:-1] + if value[-1] in 'kK': + factor = 1024 + elif value[-1] in 'mM': + factor = 1024*1024 + elif value[-1] in 'gG': + factor = 1024*1024*1024 + else: + factor = 1 + realvalue = value + try: + return int(float(realvalue) * factor) + except ValueError: + pass + return -1 Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py Thu Sep 9 01:00:13 2010 @@ -2,6 +2,7 @@ from pypy.rpython.memory.gc.semispace import SemiSpaceGC from pypy.rpython.memory.gc.semispace import GCFLAG_EXTERNAL, GCFLAG_FORWARDED from pypy.rpython.memory.gc.semispace import GC_HASH_TAKEN_ADDR +from pypy.rpython.memory.gc.base import read_from_env from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage from pypy.rpython.lltypesystem import lltype, llmemory, llarena from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE @@ -625,18 +626,7 @@ import os def nursery_size_from_env(): - value = os.environ.get('PYPY_GENERATIONGC_NURSERY') - if value: - if value[-1] in 'kK': - factor = 1024 - value = value[:-1] - else: - factor = 1 - try: - return int(value) * factor - except ValueError: - pass - return -1 + return read_from_env('PYPY_GENERATIONGC_NURSERY') def best_nursery_size_for_L2cache(L2cache): # Heuristically, the best nursery size to choose is about half Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/markcompact.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gc/markcompact.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gc/markcompact.py Thu Sep 9 01:00:13 2010 @@ -1,26 +1,17 @@ - -import time - from pypy.rpython.lltypesystem import lltype, llmemory, llarena, llgroup -from pypy.rpython.memory.gc.base import MovingGCBase -from pypy.rlib.debug import ll_assert +from pypy.rpython.memory.gc.base import MovingGCBase, read_from_env +from pypy.rlib.debug import ll_assert, have_debug_prints +from pypy.rlib.debug import debug_print, debug_start, debug_stop from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE from pypy.rpython.memory.support import get_address_stack, get_address_deque from pypy.rpython.memory.support import AddressDict from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage -from pypy.rlib.rarithmetic import ovfcheck +from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask from pypy.rpython.lltypesystem.lloperation import llop -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, running_on_llinterp from pypy.rpython.lltypesystem import rffi from pypy.rpython.memory.gcheader import GCHeaderBuilder -first_gcflag = 1 << 16 -GCFLAG_MARKBIT = first_gcflag << 0 -GCFLAG_HASHTAKEN = first_gcflag << 1 # someone already asked for the hash -GCFLAG_HASHFIELD = first_gcflag << 2 # we have an extra hash field - -memoryError = MemoryError() - # Mark'n'compact garbage collector # # main point of this GC is to save as much memory as possible @@ -33,41 +24,44 @@ # this gc works more or less like semispace, but has some essential # differencies. The main difference is that we have separate phases of # marking and assigning pointers, hence order of objects is preserved. -# This means we can reuse the same space if it did not grow enough. -# More importantly, in case we need to resize space we can copy it bit by -# bit, hence avoiding double memory consumption at peak times +# This means we can reuse the same space, overwriting it as we collect. -# so the algorithm itself is performed in 3 stages (module weakrefs and -# finalizers) +# so the algorithm itself is performed in 3 stages (modulo weakrefs and +# finalizers): # 1. We mark alive objects # 2. We walk all objects and assign forward pointers in the same order, # also updating all references -# 3. We compact the space by moving. In case we move to the same space, -# we use arena_new_view trick, which looks like new space to tests, -# but compiles to the same pointer. Also we use raw_memmove in case -# objects overlap. - -# Exact algorithm for space resizing: we keep allocated more space than needed -# (2x, can be even more), but it's full of zeroes. After each collection, -# we bump next_collect_after which is a marker where to start each collection. -# It should be exponential (but less than 2) from the size occupied by objects +# 3. We compact the space by moving. We use 'arena_new_view' trick, which +# looks like new space to tests, but compiles to the same pointer. +# Also we use raw_memmove in case the object overlaps with its destination. + +# After each collection, we bump 'next_collect_after' which is a marker +# where to start each collection. It should be exponential (but less +# than 2) from the size occupied by objects so far. # field optimization - we don't need forward pointer and flags at the same -# time. Instead we copy list of tids when we know how many objects are alive -# and store forward pointer there. +# time. Instead we copy the TIDs in a list when we know how many objects are +# alive, and store the forward pointer in the old object header. +first_gcflag_bit = LONG_BIT//2 +first_gcflag = 1 << first_gcflag_bit +GCFLAG_HASHTAKEN = first_gcflag << 0 # someone already asked for the hash +GCFLAG_HASHFIELD = first_gcflag << 1 # we have an extra hash field +# note that only the first 2 bits are preserved during a collection! +GCFLAG_MARKBIT = intmask(first_gcflag << (LONG_BIT//2-1)) +assert GCFLAG_MARKBIT < 0 # should be 0x80000000 + +GCFLAG_SAVED_HASHTAKEN = GCFLAG_HASHTAKEN >> first_gcflag_bit +GCFLAG_SAVED_HASHFIELD = GCFLAG_HASHFIELD >> first_gcflag_bit -# in case we need to grow space, we use -# current_space_size * FREE_SPACE_MULTIPLIER / FREE_SPACE_DIVIDER + needed -FREE_SPACE_MULTIPLIER = 3 -FREE_SPACE_DIVIDER = 2 -FREE_SPACE_ADD = 256 -# XXX adjust -GC_CLEARANCE = 32*1024 TID_TYPE = llgroup.HALFWORD BYTES_PER_TID = rffi.sizeof(TID_TYPE) +TID_BACKUP = rffi.CArray(TID_TYPE) + +def translated_to_c(): + return we_are_translated() and not running_on_llinterp class MarkCompactGC(MovingGCBase): @@ -76,38 +70,63 @@ withhash_flag_is_in_field = 'tid', GCFLAG_HASHFIELD # ^^^ all prebuilt objects have GCFLAG_HASHTAKEN, but only some have # GCFLAG_HASHFIELD (and then they are one word longer). - TID_BACKUP = lltype.Array(TID_TYPE, hints={'nolength':True}) - WEAKREF_OFFSETS = lltype.Array(lltype.Signed) + # The default space size is 1.9375 GB, i.e. almost 2 GB, allocated as + # a big mmap. The process does not actually consume that space until + # needed, of course. + TRANSLATION_PARAMS = {'space_size': int((1 + 15.0/16)*1024*1024*1024), + 'min_next_collect_after': 16*1024*1024} # 16MB - TRANSLATION_PARAMS = {'space_size': 8*1024*1024} # XXX adjust - - malloc_zero_filled = True + malloc_zero_filled = False inline_simple_malloc = True inline_simple_malloc_varsize = True - first_unused_gcflag = first_gcflag << 3 - total_collection_time = 0.0 - total_collection_count = 0 - - def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE, space_size=4096): - import py; py.test.skip("Disabled for now, sorry") - self.param_space_size = space_size + #total_collection_time = 0.0 + #total_collection_count = 0 + + free = NULL + next_collect_after = -1 + + def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE, space_size=4096, + min_next_collect_after=128): MovingGCBase.__init__(self, config, chunk_size) + self.space_size = space_size + self.min_next_collect_after = min_next_collect_after - def setup(self): - self.space_size = self.param_space_size - self.next_collect_after = self.param_space_size/2 # whatever... + def next_collection(self, used_space, num_objects_so_far, requested_size): + used_space += BYTES_PER_TID * num_objects_so_far + ll_assert(used_space <= self.space_size, + "used_space + num_objects_so_far overflow") + try: + next = (used_space // 3) * 2 + requested_size + except OverflowError: + next = self.space_size + if next < self.min_next_collect_after: + next = self.min_next_collect_after + if next > self.space_size - used_space: + next = self.space_size - used_space + # The value we return guarantees that used_space + next <= space_size, + # with 'BYTES_PER_TID*num_objects_so_far' included in used_space. + # Normally, the value we return should also be at least requested_size + # unless we are out of memory. + return next - if self.config.gcconfig.debugprint: - self.program_start_time = time.time() - self.space = llarena.arena_malloc(self.space_size, True) - ll_assert(bool(self.space), "couldn't allocate arena") + def setup(self): + envsize = read_from_env('PYPY_MARKCOMPACTGC_MAX') + if envsize >= 4096: + self.space_size = envsize & ~4095 + mincollect = read_from_env('PYPY_MARKCOMPACTGC_MIN') + if mincollect >= 4096: + self.min_next_collect_after = mincollect + + #self.program_start_time = time.time() + self.space = llarena.arena_malloc(self.space_size, False) + if not self.space: + raise CannotAllocateGCArena self.free = self.space - self.top_of_space = self.space + self.next_collect_after MovingGCBase.setup(self) self.objects_with_finalizers = self.AddressDeque() - self.objects_with_weakrefs = self.AddressStack() - self.tid_backup = lltype.nullptr(self.TID_BACKUP) + self.tid_backup = lltype.nullptr(TID_BACKUP) + self.next_collect_after = self.next_collection(0, 0, 0) def init_gc_object(self, addr, typeid16, flags=0): hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR)) @@ -115,219 +134,204 @@ def init_gc_object_immortal(self, addr, typeid16, flags=0): hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR)) - flags |= GCFLAG_HASHTAKEN + flags |= GCFLAG_HASHTAKEN | GCFLAG_MARKBIT + # All prebuilt GC objects have the GCFLAG_MARKBIT always set. + # That's convenient to make the GC always think that they + # survive the current collection. hdr.tid = self.combine(typeid16, flags) - # XXX we can store forward_ptr to itself, if we fix C backend - # so that get_forwarding_address(obj) returns - # obj itself if obj is a prebuilt object - def malloc_fixedsize_clear(self, typeid16, size, can_collect, - has_finalizer=False, contains_weakptr=False): - size_gc_header = self.gcheaderbuilder.size_gc_header - totalsize = size_gc_header + size - result = self.free - if raw_malloc_usage(totalsize) > self.top_of_space - result: - result = self.obtain_free_space(totalsize) + def _get_memory(self, totalsize): + # also counts the space that will be needed during the following + # collection to store the TID + requested_size = raw_malloc_usage(totalsize) + BYTES_PER_TID + self.next_collect_after -= requested_size + if self.next_collect_after < 0: + result = self.obtain_free_space(requested_size) + else: + result = self.free + self.free += totalsize llarena.arena_reserve(result, totalsize) + return result + _get_memory._always_inline_ = True + + def _get_totalsize_var(self, nonvarsize, itemsize, length): + try: + varsize = ovfcheck(itemsize * length) + except OverflowError: + raise MemoryError + # Careful to detect overflows. The following works even if varsize + # is almost equal to sys.maxint; morever, self.space_size is known + # to be at least 4095 bytes smaller than sys.maxint, so this function + # always raises instead of returning an integer >= sys.maxint-4095. + if (raw_malloc_usage(varsize) > self.space_size - + raw_malloc_usage(nonvarsize)): + raise MemoryError + return llarena.round_up_for_allocation(nonvarsize + varsize) + _get_totalsize_var._always_inline_ = True + + def _setup_object(self, result, typeid16, has_finalizer): + size_gc_header = self.gcheaderbuilder.size_gc_header self.init_gc_object(result, typeid16) - self.free += totalsize if has_finalizer: self.objects_with_finalizers.append(result + size_gc_header) - if contains_weakptr: - self.objects_with_weakrefs.append(result + size_gc_header) return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF) - + _setup_object._always_inline_ = True + + def malloc_fixedsize(self, typeid16, size, can_collect, + has_finalizer=False, contains_weakptr=False): + size_gc_header = self.gcheaderbuilder.size_gc_header + totalsize = size_gc_header + size + result = self._get_memory(totalsize) + return self._setup_object(result, typeid16, has_finalizer) + + def malloc_fixedsize_clear(self, typeid16, size, can_collect, + has_finalizer=False, contains_weakptr=False): + size_gc_header = self.gcheaderbuilder.size_gc_header + totalsize = size_gc_header + size + result = self._get_memory(totalsize) + llmemory.raw_memclear(result, totalsize) + return self._setup_object(result, typeid16, has_finalizer) + def malloc_varsize_clear(self, typeid16, length, size, itemsize, offset_to_length, can_collect): size_gc_header = self.gcheaderbuilder.size_gc_header nonvarsize = size_gc_header + size - try: - varsize = ovfcheck(itemsize * length) - totalsize = ovfcheck(nonvarsize + varsize) - except OverflowError: - raise memoryError - result = self.free - if raw_malloc_usage(totalsize) > self.top_of_space - result: - result = self.obtain_free_space(totalsize) - llarena.arena_reserve(result, totalsize) - self.init_gc_object(result, typeid16) + totalsize = self._get_totalsize_var(nonvarsize, itemsize, length) + result = self._get_memory(totalsize) + llmemory.raw_memclear(result, totalsize) (result + size_gc_header + offset_to_length).signed[0] = length - self.free = result + llarena.round_up_for_allocation(totalsize) - return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF) + return self._setup_object(result, typeid16, False) - def obtain_free_space(self, totalsize): - # a bit of tweaking to maximize the performance and minimize the - # amount of code in an inlined version of malloc_fixedsize_clear() - if not self.try_obtain_free_space(totalsize): - raise memoryError + def obtain_free_space(self, requested_size): + if self.free == NULL: + return self._emergency_initial_block(requested_size) + while True: + executed_some_finalizers = self.markcompactcollect(requested_size) + self.next_collect_after -= requested_size + if self.next_collect_after >= 0: + break # ok + else: + if executed_some_finalizers: + pass # try again to do a collection + else: + raise MemoryError return self.free obtain_free_space._dont_inline_ = True - def try_obtain_free_space(self, needed): - needed = raw_malloc_usage(needed) - while 1: - self.markcompactcollect(needed) - missing = needed - (self.top_of_space - self.free) - if missing < 0: - return True - - def new_space_size(self, occupied, needed): - res = (occupied * FREE_SPACE_MULTIPLIER / - FREE_SPACE_DIVIDER + FREE_SPACE_ADD + needed) - # align it to 4096, which is somewhat around page size - return ((res/4096) + 1) * 4096 - - def double_space_size(self, minimal_size): - while self.space_size <= minimal_size: - self.space_size *= 2 - toaddr = llarena.arena_malloc(self.space_size, True) - return toaddr - - def compute_alive_objects(self): - fromaddr = self.space - addraftercollect = self.space - num = 1 - while fromaddr < self.free: - size_gc_header = self.gcheaderbuilder.size_gc_header - tid = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR)).tid - obj = fromaddr + size_gc_header - objsize = self.get_size(obj) - objtotalsize = size_gc_header + objsize - if self.marked(obj): - copy_has_hash_field = ((tid & GCFLAG_HASHFIELD) != 0 or - ((tid & GCFLAG_HASHTAKEN) != 0 and - addraftercollect < fromaddr)) - addraftercollect += raw_malloc_usage(objtotalsize) - if copy_has_hash_field: - addraftercollect += llmemory.sizeof(lltype.Signed) - num += 1 - fromaddr += objtotalsize - if tid & GCFLAG_HASHFIELD: - fromaddr += llmemory.sizeof(lltype.Signed) - ll_assert(addraftercollect <= fromaddr, - "markcompactcollect() is trying to increase memory usage") - self.totalsize_of_objs = addraftercollect - self.space - return num + def _emergency_initial_block(self, requested_size): + # xxx before the GC is fully setup, we might get there. Hopefully + # we will only allocate a couple of strings, e.g. in read_from_env(). + # Just allocate them raw and leak them. + debug_start("gc-initial-block") + debug_print("leaking", requested_size, "bytes") + debug_stop("gc-initial-block") + return llmemory.raw_malloc(requested_size) def collect(self, gen=0): self.markcompactcollect() - - def markcompactcollect(self, needed=0): - start_time = self.debug_collect_start() + + def markcompactcollect(self, requested_size=0): + self.debug_collect_start(requested_size) self.debug_check_consistency() - self.to_see = self.AddressStack() - self.mark_roots_recursively() - if (self.objects_with_finalizers.non_empty() or - self.run_finalizers.non_empty()): - self.mark_objects_with_finalizers() - self._trace_and_mark() + # + # Mark alive objects + # + self.to_see = self.AddressDeque() + self.trace_from_roots() self.to_see.delete() - num_of_alive_objs = self.compute_alive_objects() - size_of_alive_objs = self.totalsize_of_objs - totalsize = self.new_space_size(size_of_alive_objs, needed + - num_of_alive_objs * BYTES_PER_TID) - tid_backup_size = (llmemory.sizeof(self.TID_BACKUP, 0) + - llmemory.sizeof(TID_TYPE) * num_of_alive_objs) - used_space_now = self.next_collect_after + raw_malloc_usage(tid_backup_size) - if totalsize >= self.space_size or used_space_now >= self.space_size: - toaddr = self.double_space_size(totalsize) - llarena.arena_reserve(toaddr + size_of_alive_objs, tid_backup_size) - self.tid_backup = llmemory.cast_adr_to_ptr( - toaddr + size_of_alive_objs, - lltype.Ptr(self.TID_BACKUP)) - resizing = True - else: - toaddr = llarena.arena_new_view(self.space) - llarena.arena_reserve(self.top_of_space, tid_backup_size) - self.tid_backup = llmemory.cast_adr_to_ptr( - self.top_of_space, - lltype.Ptr(self.TID_BACKUP)) - resizing = False - self.next_collect_after = totalsize - weakref_offsets = self.collect_weakref_offsets() - finaladdr = self.update_forward_pointers(toaddr, num_of_alive_objs) + # + # Prepare new views on the same memory + # + toaddr = llarena.arena_new_view(self.space) + maxnum = self.space_size - (self.free - self.space) + maxnum /= BYTES_PER_TID + llarena.arena_reserve(self.free, llmemory.sizeof(TID_BACKUP, maxnum)) + self.tid_backup = llmemory.cast_adr_to_ptr(self.free, + lltype.Ptr(TID_BACKUP)) + # + # Walk all objects and assign forward pointers in the same order, + # also updating all references + # + self.update_forward_pointers(toaddr, maxnum) if (self.run_finalizers.non_empty() or self.objects_with_finalizers.non_empty()): self.update_run_finalizers() - if self.objects_with_weakrefs.non_empty(): - self.invalidate_weakrefs(weakref_offsets) + self.update_objects_with_id() - self.compact(resizing) - if not resizing: - size = toaddr + self.space_size - finaladdr - llarena.arena_reset(finaladdr, size, True) - else: - if we_are_translated(): - # because we free stuff already in raw_memmove, we - # would get double free here. Let's free it anyway - llarena.arena_free(self.space) - llarena.arena_reset(toaddr + size_of_alive_objs, tid_backup_size, - True) - self.space = toaddr - self.free = finaladdr - self.top_of_space = toaddr + self.next_collect_after + self.compact() + # + self.tid_backup = lltype.nullptr(TID_BACKUP) + self.free = self.finaladdr + self.next_collect_after = self.next_collection(self.finaladdr - toaddr, + self.num_alive_objs, + requested_size) + # + if not translated_to_c(): + remaining_size = (toaddr + self.space_size) - self.finaladdr + llarena.arena_reset(self.finaladdr, remaining_size, False) + llarena.arena_free(self.space) + self.space = toaddr + # self.debug_check_consistency() - self.tid_backup = lltype.nullptr(self.TID_BACKUP) + self.debug_collect_finish() + if self.next_collect_after < 0: + raise MemoryError + # if self.run_finalizers.non_empty(): self.execute_finalizers() - self.debug_collect_finish(start_time) - - def collect_weakref_offsets(self): - weakrefs = self.objects_with_weakrefs - new_weakrefs = self.AddressStack() - weakref_offsets = lltype.malloc(self.WEAKREF_OFFSETS, - weakrefs.length(), flavor='raw') - i = 0 - while weakrefs.non_empty(): - obj = weakrefs.pop() - offset = self.weakpointer_offset(self.get_type_id(obj)) - weakref_offsets[i] = offset - new_weakrefs.append(obj) - i += 1 - self.objects_with_weakrefs = new_weakrefs - weakrefs.delete() - return weakref_offsets - - def debug_collect_start(self): - if self.config.gcconfig.debugprint: - llop.debug_print(lltype.Void) - llop.debug_print(lltype.Void, - ".----------- Full collection ------------------") - start_time = time.time() - return start_time - - def debug_collect_finish(self, start_time): - if self.config.gcconfig.debugprint: - end_time = time.time() - elapsed_time = end_time - start_time - self.total_collection_time += elapsed_time - self.total_collection_count += 1 - total_program_time = end_time - self.program_start_time - ct = self.total_collection_time - cc = self.total_collection_count - llop.debug_print(lltype.Void, - "| number of collections so far ", - cc) - llop.debug_print(lltype.Void, - "| total collections per second: ", - cc / total_program_time) - llop.debug_print(lltype.Void, - "| total time in markcompact-collect: ", - ct, "seconds") - llop.debug_print(lltype.Void, - "| percentage collection<->total time:", - ct * 100.0 / total_program_time, "%") - llop.debug_print(lltype.Void, - "`----------------------------------------------") + return True # executed some finalizers + else: + return False # no finalizer executed + + def debug_collect_start(self, requested_size): + if 1:# have_debug_prints(): + debug_start("gc-collect") + debug_print() + debug_print(".----------- Full collection -------------------") + debug_print("| requested size:", + requested_size) + #start_time = time.time() + #return start_time + #return -1 + + def debug_collect_finish(self): + if 1:# start_time != -1: + #end_time = time.time() + #elapsed_time = end_time - start_time + #self.total_collection_time += elapsed_time + #self.total_collection_count += 1 + #total_program_time = end_time - self.program_start_time + #ct = self.total_collection_time + #cc = self.total_collection_count + #debug_print("| number of collections so far ", + # cc) + debug_print("| total space size ", + self.space_size) + debug_print("| number of objects alive ", + self.num_alive_objs) + debug_print("| used space size ", + self.free - self.space) + debug_print("| next collection after ", + self.next_collect_after) + #debug_print("| total collections per second: ", + # cc / total_program_time) + #debug_print("| total time in markcompact-collect: ", + # ct, "seconds") + #debug_print("| percentage collection<->total time:", + # ct * 100.0 / total_program_time, "%") + debug_print("`----------------------------------------------") + debug_stop("gc-collect") def update_run_finalizers(self): - run_finalizers = self.AddressDeque() - while self.run_finalizers.non_empty(): - obj = self.run_finalizers.popleft() - run_finalizers.append(self.get_forwarding_address(obj)) - self.run_finalizers.delete() - self.run_finalizers = run_finalizers + if self.run_finalizers.non_empty(): # uncommon case + run_finalizers = self.AddressDeque() + while self.run_finalizers.non_empty(): + obj = self.run_finalizers.popleft() + run_finalizers.append(self.get_forwarding_address(obj)) + self.run_finalizers.delete() + self.run_finalizers = run_finalizers + # objects_with_finalizers = self.AddressDeque() while self.objects_with_finalizers.non_empty(): obj = self.objects_with_finalizers.popleft() @@ -356,90 +360,156 @@ tid = self.header(addr).tid return llop.extract_ushort(llgroup.HALFWORD, tid) - def mark_roots_recursively(self): + def trace_from_roots(self): self.root_walker.walk_roots( - MarkCompactGC._mark_root_recursively, # stack roots - MarkCompactGC._mark_root_recursively, # static in prebuilt non-gc structures - MarkCompactGC._mark_root_recursively) # static in prebuilt gc objects + MarkCompactGC._mark_root, # stack roots + MarkCompactGC._mark_root, # static in prebuilt non-gc structures + MarkCompactGC._mark_root) # static in prebuilt gc objects + if (self.objects_with_finalizers.non_empty() or + self.run_finalizers.non_empty()): + self.trace_from_objects_with_finalizers() self._trace_and_mark() def _trace_and_mark(self): - # XXX depth-first tracing... it can consume a lot of rawmalloced - # memory for very long stacks in some cases while self.to_see.non_empty(): - obj = self.to_see.pop() + obj = self.to_see.popleft() self.trace(obj, self._mark_obj, None) def _mark_obj(self, pointer, ignored): - obj = pointer.address[0] - if self.marked(obj): - return - self.mark(obj) - self.to_see.append(obj) + self.mark(pointer.address[0]) - def _mark_root_recursively(self, root): + def _mark_root(self, root): self.mark(root.address[0]) - self.to_see.append(root.address[0]) def mark(self, obj): - self.header(obj).tid |= GCFLAG_MARKBIT + if not self.marked(obj): + self.header(obj).tid |= GCFLAG_MARKBIT + self.to_see.append(obj) def marked(self, obj): - return self.header(obj).tid & GCFLAG_MARKBIT + # should work both if tid contains a CombinedSymbolic (for dying + # objects, at this point), or a plain integer. + return MovingGCBase.header(self, obj).tid & GCFLAG_MARKBIT + + def toaddr_smaller_than_fromaddr(self, toaddr, fromaddr): + if translated_to_c(): + return toaddr < fromaddr + else: + # convert the addresses to integers, because they are + # theoretically not from the same arena + return toaddr - self.base_forwarding_addr < fromaddr - self.space - def update_forward_pointers(self, toaddr, num_of_alive_objs): - self.base_forwarding_addr = toaddr + def update_forward_pointers(self, toaddr, maxnum): + self.base_forwarding_addr = base_forwarding_addr = toaddr fromaddr = self.space size_gc_header = self.gcheaderbuilder.size_gc_header - i = 0 + num = 0 while fromaddr < self.free: hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR)) obj = fromaddr + size_gc_header - objsize = self.get_size(obj) - totalsize = size_gc_header + objsize - if not self.marked(obj): - self.set_null_forwarding_address(obj, i) - else: - llarena.arena_reserve(toaddr, totalsize) - self.set_forwarding_address(obj, toaddr, i) - toaddr += totalsize - i += 1 - fromaddr += totalsize + # compute the original object size, including the + # optional hash field + basesize = size_gc_header + self.get_size(obj) + totalsrcsize = basesize + if hdr.tid & GCFLAG_HASHFIELD: # already a hash field, copy it too + totalsrcsize += llmemory.sizeof(lltype.Signed) + # + if self.marked(obj): + # the object is marked as suriving. Compute the new object + # size + totaldstsize = totalsrcsize + if (hdr.tid & (GCFLAG_HASHTAKEN|GCFLAG_HASHFIELD) == + GCFLAG_HASHTAKEN): + # grow a new hash field -- with the exception: if + # the object actually doesn't move, don't + # (otherwise, we get a bogus toaddr > fromaddr) + if self.toaddr_smaller_than_fromaddr(toaddr, fromaddr): + totaldstsize += llmemory.sizeof(lltype.Signed) + # + if not translated_to_c(): + llarena.arena_reserve(toaddr, basesize) + if (raw_malloc_usage(totaldstsize) > + raw_malloc_usage(basesize)): + llarena.arena_reserve(toaddr + basesize, + llmemory.sizeof(lltype.Signed)) + # + # save the field hdr.tid in the array tid_backup + ll_assert(num < maxnum, "overflow of the tid_backup table") + self.tid_backup[num] = self.get_type_id(obj) + num += 1 + # compute forward_offset, the offset to the future copy + # of this object + forward_offset = toaddr - base_forwarding_addr + # copy the first two gc flags in forward_offset + ll_assert(forward_offset & 3 == 0, "misalignment!") + forward_offset |= (hdr.tid >> first_gcflag_bit) & 3 + hdr.tid = forward_offset | GCFLAG_MARKBIT + ll_assert(self.marked(obj), "re-marking object failed!") + # done + toaddr += totaldstsize + # + fromaddr += totalsrcsize + if not translated_to_c(): + assert toaddr - base_forwarding_addr <= fromaddr - self.space + self.num_alive_objs = num + self.finaladdr = toaddr # now update references self.root_walker.walk_roots( - MarkCompactGC._update_root, # stack roots - MarkCompactGC._update_root, # static in prebuilt non-gc structures - MarkCompactGC._update_root) # static in prebuilt gc objects + MarkCompactGC._update_ref, # stack roots + MarkCompactGC._update_ref, # static in prebuilt non-gc structures + MarkCompactGC._update_ref) # static in prebuilt gc objects + self.walk_marked_objects(MarkCompactGC.trace_and_update_ref) + + def walk_marked_objects(self, callback): + num = 0 + size_gc_header = self.gcheaderbuilder.size_gc_header fromaddr = self.space - i = 0 + toaddr = self.base_forwarding_addr while fromaddr < self.free: hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR)) obj = fromaddr + size_gc_header - objsize = self.get_size_from_backup(obj, i) - totalsize = size_gc_header + objsize - if not self.surviving(obj): - pass + survives = self.marked(obj) + if survives: + typeid = self.get_typeid_from_backup(num) + num += 1 else: - self.trace_with_backup(obj, self._update_ref, i) - fromaddr += totalsize - i += 1 - return toaddr + typeid = self.get_type_id(obj) + baseobjsize = self._get_size_for_typeid(obj, typeid) + basesize = size_gc_header + baseobjsize + totalsrcsize = basesize + # + if survives: + grow_hash_field = False + if hdr.tid & GCFLAG_SAVED_HASHFIELD: + totalsrcsize += llmemory.sizeof(lltype.Signed) + totaldstsize = totalsrcsize + if (hdr.tid & (GCFLAG_SAVED_HASHTAKEN|GCFLAG_SAVED_HASHFIELD) + == GCFLAG_SAVED_HASHTAKEN): + if self.toaddr_smaller_than_fromaddr(toaddr, fromaddr): + grow_hash_field = True + totaldstsize += llmemory.sizeof(lltype.Signed) + callback(self, obj, typeid, basesize, toaddr, grow_hash_field) + toaddr += totaldstsize + else: + if hdr.tid & GCFLAG_HASHFIELD: + totalsrcsize += llmemory.sizeof(lltype.Signed) + # + fromaddr += totalsrcsize + walk_marked_objects._annspecialcase_ = 'specialize:arg(1)' - def trace_with_backup(self, obj, callback, arg): + def trace_and_update_ref(self, obj, typeid, _1, _2, _3): """Enumerate the locations inside the given obj that can contain GC pointers. For each such location, callback(pointer, arg) is called, where 'pointer' is an address inside the object. Typically, 'callback' is a bound method and 'arg' can be None. """ - typeid = self.get_typeid_from_backup(arg) if self.is_gcarrayofgcptr(typeid): # a performance shortcut for GcArray(gcptr) length = (obj + llmemory.gcarrayofptr_lengthoffset).signed[0] item = obj + llmemory.gcarrayofptr_itemsoffset while length > 0: - if self.points_to_valid_gc_object(item): - callback(item, arg) + self._update_ref(item) item += llmemory.gcarrayofptr_singleitemoffset length -= 1 return @@ -447,8 +517,7 @@ i = 0 while i < len(offsets): item = obj + offsets[i] - if self.points_to_valid_gc_object(item): - callback(item, arg) + self._update_ref(item) i += 1 if self.has_gcptr_in_varsize(typeid): item = obj + self.varsize_offset_to_variable_part(typeid) @@ -459,171 +528,122 @@ j = 0 while j < len(offsets): itemobj = item + offsets[j] - if self.points_to_valid_gc_object(itemobj): - callback(itemobj, arg) + self._update_ref(itemobj) j += 1 item += itemlength length -= 1 - trace_with_backup._annspecialcase_ = 'specialize:arg(2)' - - def _update_root(self, pointer): - if pointer.address[0] != NULL: - pointer.address[0] = self.get_forwarding_address(pointer.address[0]) - - def _update_ref(self, pointer, ignore): - if pointer.address[0] != NULL: - pointer.address[0] = self.get_forwarding_address(pointer.address[0]) + else: + weakofs = self.weakpointer_offset(typeid) + if weakofs >= 0: + self._update_weakref(obj + weakofs) + + def _update_ref(self, pointer): + if self.points_to_valid_gc_object(pointer): + pointer.address[0] = self.get_forwarding_address( + pointer.address[0]) + + def _update_weakref(self, pointer): + # either update the weak pointer's destination, or + # if it dies, write a NULL + if self.points_to_valid_gc_object(pointer): + if self.marked(pointer.address[0]): + pointer.address[0] = self.get_forwarding_address( + pointer.address[0]) + else: + pointer.address[0] = NULL def _is_external(self, obj): - return not (self.space <= obj < self.top_of_space) + return not (self.space <= obj < self.free) def get_forwarding_address(self, obj): if self._is_external(obj): return obj return self.get_header_forwarded_addr(obj) - def set_null_forwarding_address(self, obj, num): - self.backup_typeid(num, obj) - hdr = self.header(obj) - hdr.tid = -1 # make the object forwarded to NULL - - def set_forwarding_address(self, obj, newobjhdr, num): - self.backup_typeid(num, obj) - forward_offset = newobjhdr - self.base_forwarding_addr - hdr = self.header(obj) - hdr.tid = forward_offset # make the object forwarded to newobj - - def restore_normal_header(self, obj, num): - # Reverse of set_forwarding_address(). - typeid16 = self.get_typeid_from_backup(num) - hdr = self.header_forwarded(obj) - hdr.tid = self.combine(typeid16, 0) # restore the normal header - def get_header_forwarded_addr(self, obj): - return (self.base_forwarding_addr + - self.header_forwarded(obj).tid + - self.gcheaderbuilder.size_gc_header) + tid = self.header_forwarded(obj).tid + ll_assert(tid & GCFLAG_MARKBIT != 0, "dying object is not forwarded") + GCFLAG_MASK = ~(GCFLAG_MARKBIT | 3) + res = (self.base_forwarding_addr + (tid & GCFLAG_MASK) + + self.gcheaderbuilder.size_gc_header) + ll_assert(res < self.finaladdr, "forwarded address >= self.finaladdr") + return res def surviving(self, obj): - return self._is_external(obj) or self.header_forwarded(obj).tid != -1 - - def backup_typeid(self, num, obj): - self.tid_backup[num] = self.get_type_id(obj) + return self.marked(obj) def get_typeid_from_backup(self, num): return self.tid_backup[num] - def get_size_from_backup(self, obj, num): - typeid = self.get_typeid_from_backup(num) - size = self.fixed_size(typeid) - if self.is_varsize(typeid): - lenaddr = obj + self.varsize_offset_to_length(typeid) - length = lenaddr.signed[0] - size += length * self.varsize_item_sizes(typeid) - size = llarena.round_up_for_allocation(size) - # XXX maybe we should parametrize round_up_for_allocation() - # per GC; if we do, we also need to fix the call in - # gctypelayout.encode_type_shape() - return size + def compact(self): + self.walk_marked_objects(MarkCompactGC.copy_and_compact) - def compact(self, resizing): - fromaddr = self.space - size_gc_header = self.gcheaderbuilder.size_gc_header - start = fromaddr - end = fromaddr - num = 0 - while fromaddr < self.free: - obj = fromaddr + size_gc_header - objsize = self.get_size_from_backup(obj, num) - totalsize = size_gc_header + objsize - if not self.surviving(obj): - # this object dies. Following line is a noop in C, - # we clear it to make debugging easier - llarena.arena_reset(fromaddr, totalsize, False) - else: - if resizing: - end = fromaddr - forward_obj = self.get_header_forwarded_addr(obj) - self.restore_normal_header(obj, num) - if obj != forward_obj: - #llop.debug_print(lltype.Void, "Copying from to", - # fromaddr, forward_ptr, totalsize) - llmemory.raw_memmove(fromaddr, - forward_obj - size_gc_header, - totalsize) - if resizing and end - start > GC_CLEARANCE: - diff = end - start - #llop.debug_print(lltype.Void, "Cleaning", start, diff) - diff = (diff / GC_CLEARANCE) * GC_CLEARANCE - #llop.debug_print(lltype.Void, "Cleaning", start, diff) - end = start + diff - if we_are_translated(): - # XXX wuaaaaa.... those objects are freed incorrectly - # here in case of test_gc - llarena.arena_reset(start, diff, True) - start += diff - num += 1 - fromaddr += totalsize + def copy_and_compact(self, obj, typeid, basesize, toaddr, grow_hash_field): + # 'basesize' is the size without any hash field + # restore the normal header + hdr = self.header_forwarded(obj) + gcflags = hdr.tid & 3 + if grow_hash_field: + gcflags |= GCFLAG_SAVED_HASHFIELD + hashvalue = self.get_identityhash_from_addr(obj) + elif gcflags & GCFLAG_SAVED_HASHFIELD: + fromaddr = llarena.getfakearenaaddress(obj) + fromaddr -= self.gcheaderbuilder.size_gc_header + hashvalue = (fromaddr + basesize).signed[0] + else: + hashvalue = 0 # not used + # + hdr.tid = self.combine(typeid, gcflags << first_gcflag_bit) + # + fromaddr = obj - self.gcheaderbuilder.size_gc_header + if translated_to_c(): + llmemory.raw_memmove(fromaddr, toaddr, basesize) + else: + llmemory.raw_memcopy(fromaddr, toaddr, basesize) + # + if gcflags & GCFLAG_SAVED_HASHFIELD: + (toaddr + basesize).signed[0] = hashvalue def debug_check_object(self, obj): - # not sure what to check here - pass - - def mark_objects_with_finalizers(self): + type_id = self.get_type_id(obj) + self.has_gcptr_in_varsize(type_id) # checks that the type_id is valid + # + tid = self.header(obj).tid + if self._is_external(obj): + # All external objects have GCFLAG_MARKBIT and GCFLAG_HASHTAKEN + # set. + assert tid & GCFLAG_MARKBIT + assert tid & GCFLAG_HASHTAKEN + else: + # Non-external objects have GCFLAG_MARKBIT that should not be set + # at the very start or at the very end of a collection -- only + # temporarily during the collection. + assert tid & GCFLAG_MARKBIT == 0 + + def trace_from_objects_with_finalizers(self): + if self.run_finalizers.non_empty(): # uncommon case + new_run_finalizers = self.AddressDeque() + while self.run_finalizers.non_empty(): + x = self.run_finalizers.popleft() + self.mark(x) + new_run_finalizers.append(x) + self.run_finalizers.delete() + self.run_finalizers = new_run_finalizers + # + # xxx we get to run the finalizers in a random order + self._trace_and_mark() new_with_finalizers = self.AddressDeque() - run_finalizers = self.run_finalizers - new_run_finalizers = self.AddressDeque() - while run_finalizers.non_empty(): - x = run_finalizers.popleft() - self.mark(x) - self.to_see.append(x) - new_run_finalizers.append(x) - run_finalizers.delete() - self.run_finalizers = new_run_finalizers while self.objects_with_finalizers.non_empty(): x = self.objects_with_finalizers.popleft() if self.marked(x): new_with_finalizers.append(x) else: - new_run_finalizers.append(x) + self.run_finalizers.append(x) self.mark(x) - self.to_see.append(x) + self._trace_and_mark() self.objects_with_finalizers.delete() self.objects_with_finalizers = new_with_finalizers - def invalidate_weakrefs(self, weakref_offsets): - # walk over list of objects that contain weakrefs - # if the object it references survives then update the weakref - # otherwise invalidate the weakref - new_with_weakref = self.AddressStack() - i = 0 - while self.objects_with_weakrefs.non_empty(): - obj = self.objects_with_weakrefs.pop() - if not self.surviving(obj): - continue # weakref itself dies - newobj = self.get_forwarding_address(obj) - offset = weakref_offsets[i] - pointing_to = (obj + offset).address[0] - # XXX I think that pointing_to cannot be NULL here - if pointing_to: - if self.surviving(pointing_to): - (obj + offset).address[0] = self.get_forwarding_address( - pointing_to) - new_with_weakref.append(newobj) - else: - (obj + offset).address[0] = NULL - i += 1 - self.objects_with_weakrefs.delete() - self.objects_with_weakrefs = new_with_weakref - lltype.free(weakref_offsets, flavor='raw') - - def get_size_incl_hash(self, obj): - size = self.get_size(obj) - hdr = self.header(obj) - if hdr.tid & GCFLAG_HASHFIELD: - size += llmemory.sizeof(lltype.Signed) - return size - def identityhash(self, gcobj): # Unlike SemiSpaceGC.identityhash(), this function does not have # to care about reducing top_of_space. The reason is as @@ -638,8 +658,23 @@ hdr = self.header(obj) # if hdr.tid & GCFLAG_HASHFIELD: # the hash is in a field at the end - obj += self.get_size(obj) + obj = llarena.getfakearenaaddress(obj) + self.get_size(obj) return obj.signed[0] # hdr.tid |= GCFLAG_HASHTAKEN - return llmemory.cast_adr_to_int(obj) # direct case + return self.get_identityhash_from_addr(obj) + + def get_identityhash_from_addr(self, obj): + if translated_to_c(): + return llmemory.cast_adr_to_int(obj) # direct case + else: + try: + adr = llarena.getfakearenaaddress(obj) # -> arena address + except RuntimeError: + return llmemory.cast_adr_to_int(obj) # not in an arena... + return adr - self.space + +# ____________________________________________________________ + +class CannotAllocateGCArena(Exception): + pass Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py Thu Sep 9 01:00:13 2010 @@ -67,7 +67,10 @@ from pypy.config.pypyoption import get_pypy_config config = get_pypy_config(translating=True).translation self.stackroots = [] - self.gc = self.GCClass(config, **self.GC_PARAMS) + GC_PARAMS = self.GC_PARAMS.copy() + if hasattr(meth, 'GC_PARAMS'): + GC_PARAMS.update(meth.GC_PARAMS) + self.gc = self.GCClass(config, **GC_PARAMS) self.gc.DEBUG = True self.rootwalker = DirectRootWalker(self) self.gc.set_root_walker(self.rootwalker) @@ -96,7 +99,7 @@ p[index] = newvalue def malloc(self, TYPE, n=None): - addr = self.gc.malloc(self.get_type_id(TYPE), n) + addr = self.gc.malloc(self.get_type_id(TYPE), n, zero=True) return llmemory.cast_adr_to_ptr(addr, lltype.Ptr(TYPE)) def test_simple(self): @@ -311,7 +314,18 @@ print hash assert isinstance(hash, (int, long)) assert hash == self.gc.identityhash(p_const) - + # (5) p is actually moving (for the markcompact gc) + p0 = self.malloc(S) + self.stackroots.append(p0) + p = self.malloc(S) + self.stackroots.append(p) + hash = self.gc.identityhash(p) + self.stackroots.pop(-2) + self.gc.collect() # p0 goes away, p shifts left + assert hash == self.gc.identityhash(self.stackroots[-1]) + self.gc.collect() + assert hash == self.gc.identityhash(self.stackroots[-1]) + self.stackroots.pop() class TestSemiSpaceGC(DirectGCTest): from pypy.rpython.memory.gc.semispace import SemiSpaceGC as GCClass @@ -431,3 +445,14 @@ class TestMarkCompactGC(DirectGCTest): from pypy.rpython.memory.gc.markcompact import MarkCompactGC as GCClass + def test_many_objects(self): + DirectGCTest.test_many_objects(self) + test_many_objects.GC_PARAMS = {'space_size': 3 * 1024 * WORD} + + def test_varsized_from_stack(self): + DirectGCTest.test_varsized_from_stack(self) + test_varsized_from_stack.GC_PARAMS = {'space_size': 2 * 1024 * WORD} + + def test_varsized_from_prebuilt_gc(self): + DirectGCTest.test_varsized_from_prebuilt_gc(self) + test_varsized_from_prebuilt_gc.GC_PARAMS = {'space_size': 3 * 1024 * WORD} Modified: pypy/branch/fast-forward/pypy/rpython/memory/gctransform/asmgcroot.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gctransform/asmgcroot.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gctransform/asmgcroot.py Thu Sep 9 01:00:13 2010 @@ -18,6 +18,7 @@ # The .s file produced by GCC is then parsed by trackgcroot.py. # +IS_64_BITS = sys.maxint > 2147483647 class AsmGcRootFrameworkGCTransformer(FrameworkGCTransformer): _asmgcc_save_restore_arguments = None @@ -326,7 +327,7 @@ ll_assert(reg < CALLEE_SAVED_REGS, "bad register location") return callee.regs_stored_at[reg] elif kind == LOC_ESP_PLUS: # in the caller stack frame at N(%esp) - esp_in_caller = callee.frame_address + 4 + esp_in_caller = callee.frame_address + sizeofaddr return esp_in_caller + offset elif kind == LOC_EBP_PLUS: # in the caller stack frame at N(%ebp) ebp_in_caller = callee.regs_stored_at[INDEX_OF_EBP].address[0] @@ -415,11 +416,12 @@ key1 = addr1.address[0] key2 = addr2.address[0] if key1 < key2: - return -1 + result = -1 elif key1 == key2: - return 0 + result = 0 else: - return 1 + result = 1 + return rffi.cast(rffi.INT, result) # ____________________________________________________________ @@ -464,9 +466,15 @@ # - frame address (actually the addr of the retaddr of the current function; # that's the last word of the frame in memory) # -CALLEE_SAVED_REGS = 4 # there are 4 callee-saved registers -INDEX_OF_EBP = 3 -FRAME_PTR = CALLEE_SAVED_REGS # the frame is at index 4 in the array + +if IS_64_BITS: + CALLEE_SAVED_REGS = 6 + INDEX_OF_EBP = 5 + FRAME_PTR = CALLEE_SAVED_REGS +else: + CALLEE_SAVED_REGS = 4 # there are 4 callee-saved registers + INDEX_OF_EBP = 3 + FRAME_PTR = CALLEE_SAVED_REGS # the frame is at index 4 in the array ASM_CALLBACK_PTR = lltype.Ptr(lltype.FuncType([], lltype.Void)) Modified: pypy/branch/fast-forward/pypy/rpython/memory/gctypelayout.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gctypelayout.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gctypelayout.py Thu Sep 9 01:00:13 2010 @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import ll_assert +from pypy.rlib.rarithmetic import intmask from pypy.tool.identity_dict import identity_dict @@ -44,16 +45,18 @@ self.type_info_group_ptr = type_info_group._as_ptr() def get(self, typeid): - _check_typeid(typeid) - return llop.get_group_member(GCData.TYPE_INFO_PTR, - self.type_info_group_ptr, - typeid) + res = llop.get_group_member(GCData.TYPE_INFO_PTR, + self.type_info_group_ptr, + typeid) + _check_valid_type_info(res) + return res def get_varsize(self, typeid): - _check_typeid(typeid) - return llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR, - self.type_info_group_ptr, - typeid) + res = llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR, + self.type_info_group_ptr, + typeid) + _check_valid_type_info_varsize(res) + return res def q_is_varsize(self, typeid): infobits = self.get(typeid).infobits @@ -115,13 +118,24 @@ # the lowest 16bits are used to store group member index -T_MEMBER_INDEX = 0xffff +T_MEMBER_INDEX = 0xffff T_IS_VARSIZE = 0x10000 T_HAS_GCPTR_IN_VARSIZE = 0x20000 T_IS_GCARRAY_OF_GCPTR = 0x40000 T_IS_WEAKREF = 0x80000 +T_KEY_MASK = intmask(0xFF000000) +T_KEY_VALUE = intmask(0x7A000000) # bug detection only -def _check_typeid(typeid): +def _check_valid_type_info(p): + ll_assert(p.infobits & T_KEY_MASK == T_KEY_VALUE, "invalid type_id") + +def _check_valid_type_info_varsize(p): + ll_assert(p.header.infobits & (T_KEY_MASK | T_IS_VARSIZE) == + (T_KEY_VALUE | T_IS_VARSIZE), + "invalid varsize type_id") + +def check_typeid(typeid): + # xxx does not perform a full check of validity, just checks for nonzero ll_assert(llop.is_group_member_nonzero(lltype.Bool, typeid), "invalid type_id") @@ -165,9 +179,9 @@ infobits |= T_HAS_GCPTR_IN_VARSIZE varinfo.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF) varinfo.varitemsize = llmemory.sizeof(ARRAY.OF) - if TYPE == WEAKREF: + if builder.is_weakref_type(TYPE): infobits |= T_IS_WEAKREF - info.infobits = infobits + info.infobits = infobits | T_KEY_VALUE # ____________________________________________________________ @@ -250,17 +264,21 @@ _, TYPE = TYPE._first_struct() def get_info(self, type_id): - return llop.get_group_member(GCData.TYPE_INFO_PTR, - self.type_info_group._as_ptr(), - type_id) + res = llop.get_group_member(GCData.TYPE_INFO_PTR, + self.type_info_group._as_ptr(), + type_id) + _check_valid_type_info(res) + return res def get_info_varsize(self, type_id): - return llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR, - self.type_info_group._as_ptr(), - type_id) + res = llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR, + self.type_info_group._as_ptr(), + type_id) + _check_valid_type_info_varsize(res) + return res - def is_weakref(self, type_id): - return self.get_info(type_id).infobits & T_IS_WEAKREF + def is_weakref_type(self, TYPE): + return TYPE == WEAKREF def encode_type_shapes_now(self): if not self.can_encode_type_shape: Modified: pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py Thu Sep 9 01:00:13 2010 @@ -119,6 +119,9 @@ else: return True + def pyobjectptr(self, klass): + raise NotImplementedError(klass) + # ____________________________________________________________ class LLInterpRootWalker: Modified: pypy/branch/fast-forward/pypy/rpython/memory/test/test_gc.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/test/test_gc.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/test/test_gc.py Thu Sep 9 01:00:13 2010 @@ -639,12 +639,14 @@ class TestMarkCompactGC(TestSemiSpaceGC): from pypy.rpython.memory.gc.markcompact import MarkCompactGC as GCClass + GC_PARAMS = {'space_size': 65536+16384} + GC_CAN_SHRINK_ARRAY = False def test_finalizer_order(self): py.test.skip("Not implemented yet") - -class TestMarkCompactGCGrowing(TestMarkCompactGC): - GC_PARAMS = {'space_size': 16*WORD} + def test_writebarrier_before_copy(self): + py.test.skip("Not relevant, and crashes because llarena does not " + "support empty GcStructs") class TestHybridGC(TestGenerationalGC): from pypy.rpython.memory.gc.hybrid import HybridGC as GCClass Modified: pypy/branch/fast-forward/pypy/rpython/memory/test/test_gctypelayout.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/test/test_gctypelayout.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/test/test_gctypelayout.py Thu Sep 9 01:00:13 2010 @@ -101,7 +101,7 @@ accessor = rclass.FieldListAccessor() S3 = lltype.GcStruct('S', ('x', PT), ('y', PT), hints={'immutable_fields': accessor}) - accessor.initialize(S3, ['x']) + accessor.initialize(S3, {'x': ''}) # s1 = lltype.malloc(S1) adr = llmemory.cast_ptr_to_adr(s1) Modified: pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py Thu Sep 9 01:00:13 2010 @@ -1138,15 +1138,16 @@ class TestMarkCompactGC(GenericMovingGCTests): gcname = 'markcompact' - def setup_class(cls): - py.test.skip("Disabled for now, sorry") - class gcpolicy(gc.FrameworkGcPolicy): class transformerclass(framework.FrameworkGCTransformer): from pypy.rpython.memory.gc.markcompact import MarkCompactGC as GCClass - GC_PARAMS = {'space_size': 512*WORD} + GC_PARAMS = {'space_size': 4096*WORD} root_stack_depth = 200 + def test_writebarrier_before_copy(self): + py.test.skip("Not relevant, and crashes because llarena does not " + "support empty GcStructs") + class TestGenerationGC(GenericMovingGCTests): gcname = "generation" GC_CAN_SHRINK_ARRAY = True @@ -1536,3 +1537,12 @@ GC_PARAMS = {'space_size': 512*WORD, 'nursery_size': 32*WORD} root_stack_depth = 200 + +class TestMarkCompactTaggedpointerGC(TaggedPointerGCTests): + gcname = 'markcompact' + + class gcpolicy(gc.FrameworkGcPolicy): + class transformerclass(framework.FrameworkGCTransformer): + from pypy.rpython.memory.gc.markcompact import MarkCompactGC as GCClass + GC_PARAMS = {'space_size': 4096*WORD} + root_stack_depth = 200 Modified: pypy/branch/fast-forward/pypy/rpython/module/ll_os.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/module/ll_os.py (original) +++ pypy/branch/fast-forward/pypy/rpython/module/ll_os.py Thu Sep 9 01:00:13 2010 @@ -6,12 +6,15 @@ # might be found in doc/rffi.txt import os, sys, errno +import py from pypy.rpython.module.support import ll_strcpy, OOSupport -from pypy.tool.sourcetools import func_with_new_name +from pypy.tool.sourcetools import func_with_new_name, func_renamer from pypy.rlib.rarithmetic import r_longlong -from pypy.rpython.extfunc import BaseLazyRegistering +from pypy.rpython.extfunc import ( + BaseLazyRegistering, lazy_register, register_external) from pypy.rpython.extfunc import registering, registering_if, extdef -from pypy.annotation.model import SomeInteger, SomeString, SomeTuple, SomeFloat +from pypy.annotation.model import ( + SomeInteger, SomeString, SomeTuple, SomeFloat, SomeUnicodeString) from pypy.annotation.model import s_ImpossibleValue, s_None, s_Bool from pypy.rpython.lltypesystem import rffi from pypy.rpython.lltypesystem import lltype @@ -26,7 +29,99 @@ from pypy.rpython.lltypesystem.rstr import STR from pypy.rpython.annlowlevel import llstr from pypy.rlib import rgc -from pypy.rlib.objectmodel import keepalive_until_here +from pypy.rlib.objectmodel import keepalive_until_here, specialize + +def monkeypatch_rposix(posixfunc, unicodefunc, signature): + func_name = posixfunc.__name__ + + if hasattr(signature, '_default_signature_'): + signature = signature._default_signature_ + arglist = ['arg%d' % (i,) for i in range(len(signature))] + transformed_arglist = arglist[:] + for i, arg in enumerate(signature): + if arg is unicode: + transformed_arglist[i] = transformed_arglist[i] + '.as_unicode()' + + args = ', '.join(arglist) + transformed_args = ', '.join(transformed_arglist) + main_arg = 'arg%d' % (signature.index(unicode),) + + source = py.code.Source(""" + def %(func_name)s(%(args)s): + if isinstance(%(main_arg)s, str): + return posixfunc(%(args)s) + else: + return unicodefunc(%(transformed_args)s) + """ % locals()) + miniglobals = {'posixfunc' : posixfunc, + 'unicodefunc': unicodefunc, + '__name__': __name__, # for module name propagation + } + exec source.compile() in miniglobals + new_func = miniglobals[func_name] + specialized_args = [i for i in range(len(signature)) + if signature[i] in (unicode, None)] + new_func = specialize.argtype(*specialized_args)(new_func) + + # Monkeypatch the function in pypy.rlib.rposix + setattr(rposix, func_name, new_func) + +class StringTraits: + str = str + CHAR = rffi.CHAR + CCHARP = rffi.CCHARP + charp2str = staticmethod(rffi.charp2str) + str2charp = staticmethod(rffi.str2charp) + free_charp = staticmethod(rffi.free_charp) + + @staticmethod + def posix_function_name(name): + return underscore_on_windows + name + + @staticmethod + def ll_os_name(name): + return 'll_os.ll_os_' + name + +class UnicodeTraits: + str = unicode + CHAR = rffi.WCHAR_T + CCHARP = rffi.CWCHARP + charp2str = staticmethod(rffi.wcharp2unicode) + str2charp = staticmethod(rffi.unicode2wcharp) + free_charp = staticmethod(rffi.free_wcharp) + + @staticmethod + def posix_function_name(name): + return underscore_on_windows + 'w' + name + + @staticmethod + def ll_os_name(name): + return 'll_os.ll_os_w' + name + +def registering_str_unicode(posixfunc, condition=True): + if not condition: + return registering(None, condition=False) + + func_name = posixfunc.__name__ + + def register_posixfunc(self, method): + val = method(self, StringTraits()) + register_external(posixfunc, *val.def_args, **val.def_kwds) + + if sys.platform == 'win32': + val = method(self, UnicodeTraits()) + @func_renamer(func_name + "_unicode") + def unicodefunc(*args): + return posixfunc(*args) + register_external(unicodefunc, *val.def_args, **val.def_kwds) + signature = val.def_args[0] + monkeypatch_rposix(posixfunc, unicodefunc, signature) + + def decorator(method): + decorated = lambda self: register_posixfunc(self, method) + decorated._registering_func = posixfunc + return decorated + return decorator posix = __import__(os.name) @@ -282,8 +377,8 @@ return extdef([int, int], s_None, llimpl=dup2_llimpl, export_name="ll_os.ll_os_dup2") - @registering(os.utime) - def register_os_utime(self): + @registering_str_unicode(os.utime) + def register_os_utime(self, traits): UTIMBUFP = lltype.Ptr(self.UTIMBUF) os_utime = self.llexternal('utime', [rffi.CCHARP, UTIMBUFP], rffi.INT) @@ -336,6 +431,9 @@ # tp is known to be None, and one version where it is known # to be a tuple of 2 floats. if not _WIN32: + assert traits.str is str + + @specialize.argtype(1) def os_utime_llimpl(path, tp): if tp is None: error = os_utime(path, lltype.nullptr(UTIMBUFP.TO)) @@ -346,85 +444,13 @@ if error == -1: raise OSError(rposix.get_errno(), "os_utime failed") else: - from pypy.rlib import rwin32 - from pypy.rpython.module.ll_os_stat import time_t_to_FILE_TIME - - class CConfig: - _compilation_info_ = ExternalCompilationInfo( - includes = ['windows.h'], - ) - - FILE_WRITE_ATTRIBUTES = platform.ConstantInteger( - 'FILE_WRITE_ATTRIBUTES') - OPEN_EXISTING = platform.ConstantInteger( - 'OPEN_EXISTING') - FILE_FLAG_BACKUP_SEMANTICS = platform.ConstantInteger( - 'FILE_FLAG_BACKUP_SEMANTICS') - globals().update(platform.configure(CConfig)) - - CreateFile = rffi.llexternal( - 'CreateFileA', - [rwin32.LPCSTR, rwin32.DWORD, rwin32.DWORD, - rwin32.LPSECURITY_ATTRIBUTES, rwin32.DWORD, rwin32.DWORD, - rwin32.HANDLE], - rwin32.HANDLE, - calling_conv='win') - - GetSystemTime = rffi.llexternal( - 'GetSystemTime', - [lltype.Ptr(rwin32.SYSTEMTIME)], - lltype.Void, - calling_conv='win') - - SystemTimeToFileTime = rffi.llexternal( - 'SystemTimeToFileTime', - [lltype.Ptr(rwin32.SYSTEMTIME), - lltype.Ptr(rwin32.FILETIME)], - rwin32.BOOL, - calling_conv='win') - - SetFileTime = rffi.llexternal( - 'SetFileTime', - [rwin32.HANDLE, - lltype.Ptr(rwin32.FILETIME), - lltype.Ptr(rwin32.FILETIME), - lltype.Ptr(rwin32.FILETIME)], - rwin32.BOOL, - calling_conv = 'win') + from pypy.rpython.module.ll_win32file import make_utime_impl + os_utime_llimpl = make_utime_impl(traits) - def os_utime_llimpl(path, tp): - hFile = CreateFile(path, - FILE_WRITE_ATTRIBUTES, 0, - None, OPEN_EXISTING, - FILE_FLAG_BACKUP_SEMANTICS, 0) - if hFile == rwin32.INVALID_HANDLE_VALUE: - raise rwin32.lastWindowsError() - ctime = lltype.nullptr(rwin32.FILETIME) - atime = lltype.malloc(rwin32.FILETIME, flavor='raw') - mtime = lltype.malloc(rwin32.FILETIME, flavor='raw') - try: - if tp is None: - now = lltype.malloc(rwin32.SYSTEMTIME, flavor='raw') - try: - GetSystemTime(now) - if (not SystemTimeToFileTime(now, atime) or - not SystemTimeToFileTime(now, mtime)): - raise rwin32.lastWindowsError() - finally: - lltype.free(now, flavor='raw') - else: - actime, modtime = tp - time_t_to_FILE_TIME(actime, atime) - time_t_to_FILE_TIME(modtime, mtime) - if not SetFileTime(hFile, ctime, atime, mtime): - raise rwin32.lastWindowsError() - finally: - rwin32.CloseHandle(hFile) - lltype.free(atime, flavor='raw') - lltype.free(mtime, flavor='raw') - os_utime_llimpl._annspecialcase_ = 'specialize:argtype(1)' - - s_string = SomeString() + if traits.str is str: + s_string = SomeString() + else: + s_string = SomeUnicodeString() s_tuple_of_2_floats = SomeTuple([SomeFloat(), SomeFloat()]) def os_utime_normalize_args(s_path, s_times): @@ -445,12 +471,12 @@ else: raise Exception("os.utime() arg 2 must be None or a tuple of " "2 floats, got %s" % (s_times,)) + os_utime_normalize_args._default_signature_ = [traits.str, None] return extdef(os_utime_normalize_args, s_None, "ll_os.ll_os_utime", llimpl=os_utime_llimpl) - @registering(os.times) def register_os_times(self): if sys.platform.startswith('win'): @@ -687,22 +713,21 @@ def register_os_setsid(self): return self.extdef_for_os_function_returning_int('setsid') - @registering(os.open) - def register_os_open(self): - os_open = self.llexternal(underscore_on_windows+'open', - [rffi.CCHARP, rffi.INT, rffi.MODE_T], + @registering_str_unicode(os.open) + def register_os_open(self, traits): + os_open = self.llexternal(traits.posix_function_name('open'), + [traits.CCHARP, rffi.INT, rffi.MODE_T], rffi.INT) - def os_open_llimpl(path, flags, mode): result = rffi.cast(rffi.LONG, os_open(path, flags, mode)) if result == -1: raise OSError(rposix.get_errno(), "os_open failed") return result - def os_open_oofakeimpl(o_path, flags, mode): - return os.open(o_path._str, flags, mode) + def os_open_oofakeimpl(path, flags, mode): + return os.open(OOSupport.from_rstr(path), flags, mode) - return extdef([str, int, int], int, "ll_os.ll_os_open", + return extdef([traits.str, int, int], int, traits.ll_os_name('open'), llimpl=os_open_llimpl, oofakeimpl=os_open_oofakeimpl) # ------------------------------- os.read ------------------------------- @@ -862,10 +887,10 @@ llimpl=fdatasync_llimpl, export_name="ll_os.ll_os_fdatasync") - @registering(os.access) - def register_os_access(self): - os_access = self.llexternal(underscore_on_windows + 'access', - [rffi.CCHARP, rffi.INT], + @registering_str_unicode(os.access) + def register_os_access(self, traits): + os_access = self.llexternal(traits.posix_function_name('access'), + [traits.CCHARP, rffi.INT], rffi.INT) if sys.platform.startswith('win'): @@ -882,44 +907,22 @@ def os_access_oofakeimpl(path, mode): return os.access(OOSupport.from_rstr(path), mode) - return extdef([str, int], s_Bool, llimpl=access_llimpl, - export_name="ll_os.ll_os_access", + return extdef([traits.str, int], s_Bool, llimpl=access_llimpl, + export_name=traits.ll_os_name("access"), oofakeimpl=os_access_oofakeimpl) - @registering_if(posix, '_getfullpathname') - def register_posix__getfullpathname(self): - from pypy.rlib import rwin32 + @registering_str_unicode(getattr(posix, '_getfullpathname', None), + condition=sys.platform=='win32') + def register_posix__getfullpathname(self, traits): # this nt function is not exposed via os, but needed # to get a correct implementation of os.abspath - # XXX why do we ignore WINAPI conventions everywhere? - LPSTRP = rffi.CArrayPtr(rwin32.LPSTR) - # XXX unicode? - GetFullPathName = self.llexternal( - 'GetFullPathNameA', - [rwin32.LPCSTR, - rwin32.DWORD, - rwin32.LPSTR, - rffi.CArrayPtr(rwin32.LPSTR)], - rwin32.DWORD) - - def _getfullpathname_llimpl(lpFileName): - nBufferLength = rwin32.MAX_PATH + 1 - lpBuffer = lltype.malloc(rwin32.LPSTR.TO, nBufferLength, flavor='raw') - try: - res = GetFullPathName( - lpFileName, rffi.cast(rwin32.DWORD, nBufferLength), - lpBuffer, lltype.nullptr(LPSTRP.TO)) - if res == 0: - raise rwin32.lastWindowsError("_getfullpathname failed") - result = rffi.charp2str(lpBuffer) - return result - finally: - lltype.free(lpBuffer, flavor='raw') + from pypy.rpython.module.ll_win32file import make_getfullpathname_impl + getfullpathname_llimpl = make_getfullpathname_impl(traits) - return extdef([str], # a single argument which is a str - str, # returns a string - "ll_os.posix__getfullpathname", - llimpl=_getfullpathname_llimpl) + return extdef([traits.str], # a single argument which is a str + traits.str, # returns a string + traits.ll_os_name('_getfullpathname'), + llimpl=getfullpathname_llimpl) @registering(os.getcwd) def register_os_getcwd(self): @@ -953,71 +956,42 @@ "ll_os.ll_os_getcwd", llimpl=os_getcwd_llimpl, oofakeimpl=os_getcwd_oofakeimpl) - @registering(os.listdir) - def register_os_listdir(self): - # we need a different approach on Windows and on Posix - if sys.platform.startswith('win'): - from pypy.rlib import rwin32 - class CConfig: - _compilation_info_ = ExternalCompilationInfo( - includes = ['windows.h'] - ) - WIN32_FIND_DATA = platform.Struct('struct _WIN32_FIND_DATAA', - [('cFileName', lltype.FixedSizeArray(rffi.CHAR, 1))]) - ERROR_FILE_NOT_FOUND = platform.ConstantInteger( - 'ERROR_FILE_NOT_FOUND') - ERROR_NO_MORE_FILES = platform.ConstantInteger( - 'ERROR_NO_MORE_FILES') + @registering(os.getcwdu, condition=sys.platform=='win32') + def register_os_getcwdu(self): + os_wgetcwd = self.llexternal(underscore_on_windows + 'wgetcwd', + [rffi.CWCHARP, rffi.SIZE_T], + rffi.CWCHARP) - config = platform.configure(CConfig) - WIN32_FIND_DATA = config['WIN32_FIND_DATA'] - ERROR_FILE_NOT_FOUND = config['ERROR_FILE_NOT_FOUND'] - ERROR_NO_MORE_FILES = config['ERROR_NO_MORE_FILES'] - LPWIN32_FIND_DATA = lltype.Ptr(WIN32_FIND_DATA) - - FindFirstFile = self.llexternal('FindFirstFile', - [rwin32.LPCSTR, LPWIN32_FIND_DATA], - rwin32.HANDLE) - FindNextFile = self.llexternal('FindNextFile', - [rwin32.HANDLE, LPWIN32_FIND_DATA], - rwin32.BOOL) - FindClose = self.llexternal('FindClose', - [rwin32.HANDLE], - rwin32.BOOL) + def os_getcwd_llimpl(): + bufsize = 256 + while True: + buf = lltype.malloc(rffi.CWCHARP.TO, bufsize, flavor='raw') + res = os_wgetcwd(buf, rffi.cast(rffi.SIZE_T, bufsize)) + if res: + break # ok + error = rposix.get_errno() + lltype.free(buf, flavor='raw') + if error != errno.ERANGE: + raise OSError(error, "getcwd failed") + # else try again with a larger buffer, up to some sane limit + bufsize *= 4 + if bufsize > 1024*1024: # xxx hard-coded upper limit + raise OSError(error, "getcwd result too large") + result = rffi.wcharp2unicode(res) + lltype.free(buf, flavor='raw') + return result - def os_listdir_llimpl(path): - if path and path[-1] not in ('/', '\\', ':'): - path += '/' - path += '*.*' - filedata = lltype.malloc(WIN32_FIND_DATA, flavor='raw') - try: - result = [] - hFindFile = FindFirstFile(path, filedata) - if hFindFile == rwin32.INVALID_HANDLE_VALUE: - error = rwin32.GetLastError() - if error == ERROR_FILE_NOT_FOUND: - return result - else: - raise WindowsError(error, "FindFirstFile failed") - while True: - name = rffi.charp2str(rffi.cast(rffi.CCHARP, - filedata.c_cFileName)) - if name != "." and name != "..": # skip these - result.append(name) - if not FindNextFile(hFindFile, filedata): - break - # FindNextFile sets error to ERROR_NO_MORE_FILES if - # it got to the end of the directory - error = rwin32.GetLastError() - FindClose(hFindFile) - if error == ERROR_NO_MORE_FILES: - return result - else: - raise WindowsError(error, "FindNextFile failed") - finally: - lltype.free(filedata, flavor='raw') + return extdef([], unicode, + "ll_os.ll_os_wgetcwd", llimpl=os_getcwd_llimpl) + @registering_str_unicode(os.listdir) + def register_os_listdir(self, traits): + # we need a different approach on Windows and on Posix + if sys.platform.startswith('win'): + from pypy.rpython.module.ll_win32file import make_listdir_impl + os_listdir_llimpl = make_listdir_impl(traits) else: + assert traits.str is str compilation_info = ExternalCompilationInfo( includes = ['sys/types.h', 'dirent.h'] ) @@ -1057,9 +1031,9 @@ raise OSError(error, "os_readdir failed") return result - return extdef([str], # a single argument which is a str - [str], # returns a list of strings - "ll_os.ll_os_listdir", + return extdef([traits.str], # a single argument which is a str + [traits.str], # returns a list of strings + traits.ll_os_name('listdir'), llimpl=os_listdir_llimpl) @registering(os.pipe) @@ -1234,38 +1208,40 @@ return extdef([str], int, llimpl=system_llimpl, export_name="ll_os.ll_os_system") - @registering(os.unlink) - def register_os_unlink(self): - os_unlink = self.llexternal(underscore_on_windows+'unlink', [rffi.CCHARP], rffi.INT) + @registering_str_unicode(os.unlink) + def register_os_unlink(self, traits): + os_unlink = self.llexternal(traits.posix_function_name('unlink'), + [traits.CCHARP], rffi.INT) def unlink_llimpl(pathname): res = rffi.cast(lltype.Signed, os_unlink(pathname)) if res < 0: raise OSError(rposix.get_errno(), "os_unlink failed") - return extdef([str], s_None, llimpl=unlink_llimpl, - export_name="ll_os.ll_os_unlink") + return extdef([traits.str], s_None, llimpl=unlink_llimpl, + export_name=traits.ll_os_name('unlink')) - @registering(os.chdir) - def register_os_chdir(self): - os_chdir = self.llexternal(underscore_on_windows+'chdir', [rffi.CCHARP], rffi.INT) + @registering_str_unicode(os.chdir) + def register_os_chdir(self, traits): + os_chdir = self.llexternal(traits.posix_function_name('chdir'), + [traits.CCHARP], rffi.INT) def chdir_llimpl(path): res = rffi.cast(lltype.Signed, os_chdir(path)) if res < 0: raise OSError(rposix.get_errno(), "os_chdir failed") - return extdef([str], s_None, llimpl=chdir_llimpl, - export_name="ll_os.ll_os_chdir") + return extdef([traits.str], s_None, llimpl=chdir_llimpl, + export_name=traits.ll_os_name('chdir')) - @registering(os.mkdir) - def register_os_mkdir(self): + @registering_str_unicode(os.mkdir) + def register_os_mkdir(self, traits): if os.name == 'nt': ARG2 = [] # no 'mode' argument on Windows - just ignored else: ARG2 = [rffi.MODE_T] - os_mkdir = self.llexternal(underscore_on_windows+'mkdir', - [rffi.CCHARP]+ARG2, rffi.INT) + os_mkdir = self.llexternal(traits.posix_function_name('mkdir'), + [traits.CCHARP] + ARG2, rffi.INT) IGNORE_MODE = len(ARG2) == 0 def mkdir_llimpl(pathname, mode): @@ -1277,46 +1253,47 @@ if res < 0: raise OSError(rposix.get_errno(), "os_mkdir failed") - return extdef([str, int], s_None, llimpl=mkdir_llimpl, - export_name="ll_os.ll_os_mkdir") + return extdef([traits.str, int], s_None, llimpl=mkdir_llimpl, + export_name=traits.ll_os_name('mkdir')) - @registering(os.rmdir) - def register_os_rmdir(self): - os_rmdir = self.llexternal(underscore_on_windows+'rmdir', [rffi.CCHARP], rffi.INT) + @registering_str_unicode(os.rmdir) + def register_os_rmdir(self, traits): + os_rmdir = self.llexternal(traits.posix_function_name('rmdir'), + [traits.CCHARP], rffi.INT) def rmdir_llimpl(pathname): res = rffi.cast(lltype.Signed, os_rmdir(pathname)) if res < 0: raise OSError(rposix.get_errno(), "os_rmdir failed") - return extdef([str], s_None, llimpl=rmdir_llimpl, - export_name="ll_os.ll_os_rmdir") + return extdef([traits.str], s_None, llimpl=rmdir_llimpl, + export_name=traits.ll_os_name('rmdir')) - @registering(os.chmod) - def register_os_chmod(self): - os_chmod = self.llexternal(underscore_on_windows+'chmod', [rffi.CCHARP, rffi.MODE_T], - rffi.INT) + @registering_str_unicode(os.chmod) + def register_os_chmod(self, traits): + os_chmod = self.llexternal(traits.posix_function_name('chmod'), + [traits.CCHARP, rffi.MODE_T], rffi.INT) def chmod_llimpl(path, mode): res = rffi.cast(lltype.Signed, os_chmod(path, rffi.cast(rffi.MODE_T, mode))) if res < 0: raise OSError(rposix.get_errno(), "os_chmod failed") - return extdef([str, int], s_None, llimpl=chmod_llimpl, - export_name="ll_os.ll_os_chmod") + return extdef([traits.str, int], s_None, llimpl=chmod_llimpl, + export_name=traits.ll_os_name('chmod')) - @registering(os.rename) - def register_os_rename(self): - os_rename = self.llexternal('rename', [rffi.CCHARP, rffi.CCHARP], - rffi.INT) + @registering_str_unicode(os.rename) + def register_os_rename(self, traits): + os_rename = self.llexternal(traits.posix_function_name('rename'), + [traits.CCHARP, traits.CCHARP], rffi.INT) def rename_llimpl(oldpath, newpath): res = rffi.cast(lltype.Signed, os_rename(oldpath, newpath)) if res < 0: raise OSError(rposix.get_errno(), "os_rename failed") - return extdef([str, str], s_None, llimpl=rename_llimpl, - export_name="ll_os.ll_os_rename") + return extdef([traits.str, traits.str], s_None, llimpl=rename_llimpl, + export_name=traits.ll_os_name('rename')) @registering(os.umask) def register_os_umask(self): @@ -1425,17 +1402,17 @@ @registering(os.fstat) def register_os_fstat(self): from pypy.rpython.module import ll_os_stat - ll_os_stat.register_stat_variant('fstat') + return ll_os_stat.register_stat_variant('fstat', StringTraits()) - @registering(os.stat) - def register_os_stat(self): + @registering_str_unicode(os.stat) + def register_os_stat(self, traits): from pypy.rpython.module import ll_os_stat - ll_os_stat.register_stat_variant('stat') + return ll_os_stat.register_stat_variant('stat', traits) - @registering(os.lstat) - def register_os_lstat(self): + @registering_str_unicode(os.lstat) + def register_os_lstat(self, traits): from pypy.rpython.module import ll_os_stat - ll_os_stat.register_stat_variant('lstat') + return ll_os_stat.register_stat_variant('lstat', traits) # ------------------------------- os.W* --------------------------------- Modified: pypy/branch/fast-forward/pypy/rpython/module/ll_os_stat.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/module/ll_os_stat.py (original) +++ pypy/branch/fast-forward/pypy/rpython/module/ll_os_stat.py Thu Sep 9 01:00:13 2010 @@ -5,13 +5,14 @@ import os, sys from pypy.annotation import model as annmodel from pypy.tool.pairtype import pairtype -from pypy.tool.sourcetools import func_with_new_name +from pypy.tool.sourcetools import func_with_new_name, func_renamer from pypy.rpython import extregistry -from pypy.rpython.extfunc import register_external +from pypy.rpython.extfunc import register_external, extdef from pypy.rpython.lltypesystem import rffi, lltype from pypy.rpython.tool import rffi_platform as platform from pypy.rpython.lltypesystem.rtupletype import TUPLE_TYPE from pypy.rlib import rposix +from pypy.rlib.objectmodel import specialize from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rpython.annlowlevel import hlstr @@ -211,13 +212,27 @@ return make_stat_result(result) -def register_stat_variant(name): - if sys.platform.startswith('win'): - _functions = {'stat': '_stati64', - 'fstat': '_fstati64', - 'lstat': '_stati64'} # no lstat on Windows - c_func_name = _functions[name] - elif sys.platform.startswith('linux'): +def register_stat_variant(name, traits): + if name != 'fstat': + arg_is_path = True + s_arg = traits.str + ARG1 = traits.CCHARP + else: + arg_is_path = False + s_arg = int + ARG1 = rffi.INT + + if sys.platform == 'win32': + # See Win32 implementation below + posix_stat_llimpl = make_win32_stat_impl(name, traits) + + return extdef( + [s_arg], s_StatResult, traits.ll_os_name(name), + llimpl=posix_stat_llimpl) + + assert traits.str is str + + if sys.platform.startswith('linux'): # because we always use _FILE_OFFSET_BITS 64 - this helps things work that are not a c compiler _functions = {'stat': 'stat64', 'fstat': 'fstat64', @@ -226,22 +241,26 @@ else: c_func_name = name - arg_is_path = (name != 'fstat') + posix_mystat = rffi.llexternal(c_func_name, + [ARG1, STAT_STRUCT], rffi.INT, + compilation_info=compilation_info) + @func_renamer('os_%s_llimpl' % (name,)) def posix_stat_llimpl(arg): stresult = lltype.malloc(STAT_STRUCT.TO, flavor='raw') try: if arg_is_path: - arg = rffi.str2charp(arg) + arg = traits.str2charp(arg) error = rffi.cast(rffi.LONG, posix_mystat(arg, stresult)) if arg_is_path: - rffi.free_charp(arg) + traits.free_charp(arg) if error != 0: raise OSError(rposix.get_errno(), "os_?stat failed") return build_stat_result(stresult) finally: lltype.free(stresult, flavor='raw') + @func_renamer('os_%s_fake' % (name,)) def posix_fakeimpl(arg): if s_arg == str: arg = hlstr(arg) @@ -259,40 +278,17 @@ setattr(ll_tup, 'item%d' % i, val) return ll_tup - if arg_is_path: - s_arg = str - ARG1 = rffi.CCHARP - else: - s_arg = int - ARG1 = rffi.INT + return extdef( + [s_arg], s_StatResult, "ll_os.ll_os_%s" % (name,), + llimpl=posix_stat_llimpl, llfakeimpl=posix_fakeimpl) - if sys.platform != 'win32': - posix_mystat = rffi.llexternal(c_func_name, - [ARG1, STAT_STRUCT], rffi.INT, - compilation_info=compilation_info) - - register_external( - getattr(os, name), [s_arg], s_StatResult, - "ll_os.ll_os_%s" % (name,), - llimpl=func_with_new_name(posix_stat_llimpl, - 'os_%s_llimpl' % (name,)), - llfakeimpl=func_with_new_name(posix_fakeimpl, - 'os_%s_fake' % (name,)), - ) - else: - # See Win32 implementation below - register_external( - getattr(os, name), [s_arg], s_StatResult, - "ll_os.ll_os_%s" % (name,), - llimpl=func_with_new_name(globals()['win32_%s_llimpl' % (name,)], - 'os_%s_llimpl' % (name,)), - ) +def make_win32_stat_impl(name, traits): + from pypy.rlib import rwin32 + from pypy.rpython.module.ll_win32file import make_win32_traits + win32traits = make_win32_traits(traits) -# ____________________________________________________________ -if sys.platform == 'win32': # The CRT of Windows has a number of flaws wrt. its stat() implementation: - # - for when we implement subsecond resolution in RPython, time stamps - # would be restricted to second resolution + # - time stamps are restricted to second resolution # - file modification times suffer from forth-and-back conversions between # UTC and local time # Therefore, we implement our own stat, based on the Win32 API directly. @@ -302,122 +298,18 @@ assert len(STAT_FIELDS) == 10 # no extra fields on Windows - class CConfig: - _compilation_info_ = ExternalCompilationInfo( - includes = ['windows.h', 'winbase.h', 'sys/stat.h'], - ) - - GetFileExInfoStandard = platform.ConstantInteger( - 'GetFileExInfoStandard') - FILE_ATTRIBUTE_DIRECTORY = platform.ConstantInteger( - 'FILE_ATTRIBUTE_DIRECTORY') - FILE_ATTRIBUTE_READONLY = platform.ConstantInteger( - 'FILE_ATTRIBUTE_READONLY') - ERROR_SHARING_VIOLATION = platform.ConstantInteger( - 'ERROR_SHARING_VIOLATION') - _S_IFDIR = platform.ConstantInteger('_S_IFDIR') - _S_IFREG = platform.ConstantInteger('_S_IFREG') - _S_IFCHR = platform.ConstantInteger('_S_IFCHR') - _S_IFIFO = platform.ConstantInteger('_S_IFIFO') - FILE_TYPE_UNKNOWN = platform.ConstantInteger('FILE_TYPE_UNKNOWN') - FILE_TYPE_CHAR = platform.ConstantInteger('FILE_TYPE_CHAR') - FILE_TYPE_PIPE = platform.ConstantInteger('FILE_TYPE_PIPE') - - WIN32_FILE_ATTRIBUTE_DATA = platform.Struct( - 'WIN32_FILE_ATTRIBUTE_DATA', - [('dwFileAttributes', rwin32.DWORD), - ('nFileSizeHigh', rwin32.DWORD), - ('nFileSizeLow', rwin32.DWORD), - ('ftCreationTime', rwin32.FILETIME), - ('ftLastAccessTime', rwin32.FILETIME), - ('ftLastWriteTime', rwin32.FILETIME)]) - - BY_HANDLE_FILE_INFORMATION = platform.Struct( - 'BY_HANDLE_FILE_INFORMATION', - [('dwFileAttributes', rwin32.DWORD), - ('nFileSizeHigh', rwin32.DWORD), - ('nFileSizeLow', rwin32.DWORD), - ('nNumberOfLinks', rwin32.DWORD), - ('nFileIndexHigh', rwin32.DWORD), - ('nFileIndexLow', rwin32.DWORD), - ('ftCreationTime', rwin32.FILETIME), - ('ftLastAccessTime', rwin32.FILETIME), - ('ftLastWriteTime', rwin32.FILETIME)]) - - WIN32_FIND_DATA = platform.Struct( - 'WIN32_FIND_DATAA', - # Only interesting fields - [('dwFileAttributes', rwin32.DWORD), - ('nFileSizeHigh', rwin32.DWORD), - ('nFileSizeLow', rwin32.DWORD), - ('ftCreationTime', rwin32.FILETIME), - ('ftLastAccessTime', rwin32.FILETIME), - ('ftLastWriteTime', rwin32.FILETIME)]) - - globals().update(platform.configure(CConfig)) - GET_FILEEX_INFO_LEVELS = rffi.ULONG # an enumeration - - GetFileAttributesEx = rffi.llexternal( - 'GetFileAttributesExA', - [rffi.CCHARP, GET_FILEEX_INFO_LEVELS, - lltype.Ptr(WIN32_FILE_ATTRIBUTE_DATA)], - rwin32.BOOL, - calling_conv='win') - - GetFileInformationByHandle = rffi.llexternal( - 'GetFileInformationByHandle', - [rwin32.HANDLE, lltype.Ptr(BY_HANDLE_FILE_INFORMATION)], - rwin32.BOOL, - calling_conv='win') - - GetFileType = rffi.llexternal( - 'GetFileType', - [rwin32.HANDLE], - rwin32.DWORD, - calling_conv='win') - - FindFirstFile = rffi.llexternal( - 'FindFirstFileA', - [rffi.CCHARP, lltype.Ptr(WIN32_FIND_DATA)], - rwin32.HANDLE, - calling_conv='win') - - FindClose = rffi.llexternal( - 'FindClose', - [rwin32.HANDLE], - rwin32.BOOL, - calling_conv='win') - def attributes_to_mode(attributes): m = 0 - if attributes & FILE_ATTRIBUTE_DIRECTORY: - m |= _S_IFDIR | 0111 # IFEXEC for user,group,other + if attributes & win32traits.FILE_ATTRIBUTE_DIRECTORY: + m |= win32traits._S_IFDIR | 0111 # IFEXEC for user,group,other else: - m |= _S_IFREG - if attributes & FILE_ATTRIBUTE_READONLY: + m |= win32traits._S_IFREG + if attributes & win32traits.FILE_ATTRIBUTE_READONLY: m |= 0444 else: m |= 0666 return m - def make_longlong(high, low): - return (lltype.r_longlong(high) << 32) + lltype.r_longlong(low) - - # Seconds between 1.1.1601 and 1.1.1970 - secs_between_epochs = lltype.r_longlong(11644473600) - - def FILE_TIME_to_time_t_nsec(filetime): - ft = make_longlong(filetime.c_dwHighDateTime, filetime.c_dwLowDateTime) - # FILETIME is in units of 100 nsec - nsec = (ft % 10000000) * 100 - time = (ft / 10000000) - secs_between_epochs - return time, nsec - - def time_t_to_FILE_TIME(time, filetime): - ft = lltype.r_longlong((time + secs_between_epochs) * 10000000) - filetime.c_dwHighDateTime = lltype.r_uint(ft >> 32) - filetime.c_dwLowDateTime = lltype.r_uint(ft & ((1 << 32) - 1)) - def attribute_data_to_stat(info): st_mode = attributes_to_mode(info.c_dwFileAttributes) st_size = make_longlong(info.c_nFileSizeHigh, info.c_nFileSizeLow) @@ -456,65 +348,94 @@ return make_stat_result(result) def attributes_from_dir(l_path, data): - filedata = lltype.malloc(WIN32_FIND_DATA, flavor='raw') - hFindFile = FindFirstFile(l_path, filedata) - if hFindFile == rwin32.INVALID_HANDLE_VALUE: - return 0 - FindClose(hFindFile) - data.c_dwFileAttributes = filedata.c_dwFileAttributes - rffi.structcopy(data.c_ftCreationTime, filedata.c_ftCreationTime) - rffi.structcopy(data.c_ftLastAccessTime, filedata.c_ftLastAccessTime) - rffi.structcopy(data.c_ftLastWriteTime, filedata.c_ftLastWriteTime) - data.c_nFileSizeHigh = filedata.c_nFileSizeHigh - data.c_nFileSizeLow = filedata.c_nFileSizeLow - return 1 + filedata = lltype.malloc(win32traits.WIN32_FIND_DATA, flavor='raw') + try: + hFindFile = win32traits.FindFirstFile(l_path, filedata) + if hFindFile == rwin32.INVALID_HANDLE_VALUE: + return 0 + win32traits.FindClose(hFindFile) + data.c_dwFileAttributes = filedata.c_dwFileAttributes + rffi.structcopy(data.c_ftCreationTime, filedata.c_ftCreationTime) + rffi.structcopy(data.c_ftLastAccessTime, filedata.c_ftLastAccessTime) + rffi.structcopy(data.c_ftLastWriteTime, filedata.c_ftLastWriteTime) + data.c_nFileSizeHigh = filedata.c_nFileSizeHigh + data.c_nFileSizeLow = filedata.c_nFileSizeLow + return 1 + finally: + lltype.free(filedata, flavor='raw') def win32_stat_llimpl(path): - data = lltype.malloc(WIN32_FILE_ATTRIBUTE_DATA, flavor='raw') + data = lltype.malloc(win32traits.WIN32_FILE_ATTRIBUTE_DATA, flavor='raw') try: - l_path = rffi.str2charp(path) - res = GetFileAttributesEx(l_path, GetFileExInfoStandard, data) + l_path = traits.str2charp(path) + res = win32traits.GetFileAttributesEx(l_path, win32traits.GetFileExInfoStandard, data) errcode = rwin32.GetLastError() if res == 0: - if errcode == ERROR_SHARING_VIOLATION: + if errcode == win32traits.ERROR_SHARING_VIOLATION: res = attributes_from_dir(l_path, data) errcode = rwin32.GetLastError() - rffi.free_charp(l_path) + traits.free_charp(l_path) if res == 0: raise WindowsError(errcode, "os_stat failed") return attribute_data_to_stat(data) finally: lltype.free(data, flavor='raw') - win32_lstat_llimpl = win32_stat_llimpl def win32_fstat_llimpl(fd): handle = rwin32._get_osfhandle(fd) - filetype = GetFileType(handle) - if filetype == FILE_TYPE_CHAR: + filetype = win32traits.GetFileType(handle) + if filetype == win32traits.FILE_TYPE_CHAR: # console or LPT device - return make_stat_result((_S_IFCHR, + return make_stat_result((win32traits._S_IFCHR, 0, 0, 0, 0, 0, 0, 0, 0, 0)) - elif filetype == FILE_TYPE_PIPE: + elif filetype == win32traits.FILE_TYPE_PIPE: # socket or named pipe - return make_stat_result((_S_IFIFO, + return make_stat_result((win32traits._S_IFIFO, 0, 0, 0, 0, 0, 0, 0, 0, 0)) - elif filetype == FILE_TYPE_UNKNOWN: + elif filetype == win32traits.FILE_TYPE_UNKNOWN: error = rwin32.GetLastError() if error != 0: raise WindowsError(error, "os_fstat failed") # else: unknown but valid file # normal disk file (FILE_TYPE_DISK) - info = lltype.malloc(BY_HANDLE_FILE_INFORMATION, flavor='raw', - zero=True) + info = lltype.malloc(win32traits.BY_HANDLE_FILE_INFORMATION, + flavor='raw', zero=True) try: - res = GetFileInformationByHandle(handle, info) + res = win32traits.GetFileInformationByHandle(handle, info) if res == 0: raise WindowsError(rwin32.GetLastError(), "os_fstat failed") return by_handle_info_to_stat(info) finally: lltype.free(info, flavor='raw') + if name == 'fstat': + return win32_fstat_llimpl + else: + return win32_stat_llimpl + + +#__________________________________________________ +# Helper functions for win32 + +def make_longlong(high, low): + return (lltype.r_longlong(high) << 32) + lltype.r_longlong(low) + +# Seconds between 1.1.1601 and 1.1.1970 +secs_between_epochs = lltype.r_longlong(11644473600) + +def FILE_TIME_to_time_t_nsec(filetime): + ft = make_longlong(filetime.c_dwHighDateTime, filetime.c_dwLowDateTime) + # FILETIME is in units of 100 nsec + nsec = (ft % 10000000) * 100 + time = (ft / 10000000) - secs_between_epochs + return time, nsec + +def time_t_to_FILE_TIME(time, filetime): + ft = lltype.r_longlong((time + secs_between_epochs) * 10000000) + filetime.c_dwHighDateTime = lltype.r_uint(ft >> 32) + filetime.c_dwLowDateTime = lltype.r_uint(ft & ((1 << 32) - 1)) + Modified: pypy/branch/fast-forward/pypy/rpython/module/ll_time.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/module/ll_time.py (original) +++ pypy/branch/fast-forward/pypy/rpython/module/ll_time.py Thu Sep 9 01:00:13 2010 @@ -9,6 +9,7 @@ from pypy.rpython.lltypesystem import lltype from pypy.rpython.extfunc import BaseLazyRegistering, registering, extdef from pypy.rlib import rposix +from pypy.rlib.rarithmetic import intmask from pypy.translator.tool.cbuild import ExternalCompilationInfo if sys.platform == 'win32': @@ -119,7 +120,8 @@ if self.HAVE_FTIME: t = lltype.malloc(self.TIMEB, flavor='raw') c_ftime(t) - result = float(int(t.c_time)) + float(int(t.c_millitm)) * 0.001 + result = (float(intmask(t.c_time)) + + float(intmask(t.c_millitm)) * 0.001) lltype.free(t, flavor='raw') return result return float(c_time(void)) Modified: pypy/branch/fast-forward/pypy/rpython/module/test/test_ll_os_stat.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/module/test/test_ll_os_stat.py (original) +++ pypy/branch/fast-forward/pypy/rpython/module/test/test_ll_os_stat.py Thu Sep 9 01:00:13 2010 @@ -1,4 +1,4 @@ -from pypy.rpython.module import ll_os_stat +from pypy.rpython.module import ll_os_stat, ll_os import sys, os import py @@ -8,14 +8,18 @@ py.test.skip("win32 specific tests") def test_stat(self): - stat = ll_os_stat.win32_stat_llimpl + stat = ll_os_stat.make_win32_stat_impl('stat', ll_os.StringTraits()) + wstat = ll_os_stat.make_win32_stat_impl('stat', ll_os.UnicodeTraits()) def check(f): - assert stat(f).st_mtime == os.stat(f).st_mtime + expected = os.stat(f).st_mtime + assert stat(f).st_mtime == expected + assert wstat(unicode(f)).st_mtime == expected check('c:/') check('c:/temp') check('c:/pagefile.sys') def test_fstat(self): - stat = ll_os_stat.win32_fstat_llimpl(0) # stdout + fstat = ll_os_stat.make_win32_stat_impl('fstat', ll_os.StringTraits()) + stat = fstat(0) # stdout assert stat.st_mode != 0 Modified: pypy/branch/fast-forward/pypy/rpython/ootypesystem/ootype.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/ootypesystem/ootype.py (original) +++ pypy/branch/fast-forward/pypy/rpython/ootypesystem/ootype.py Thu Sep 9 01:00:13 2010 @@ -267,6 +267,14 @@ return self._fields_with_default[:] return self._superclass._get_fields_with_default() + self._fields_with_default + def _immutable_field(self, field): + if 'immutable_fields' in self._hints: + try: + s = self._hints['immutable_fields'].fields[field] + return s or True + except KeyError: + pass + return self._hints.get('immutable', False) class SpecializableType(OOType): Modified: pypy/branch/fast-forward/pypy/rpython/ootypesystem/rclass.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/ootypesystem/rclass.py (original) +++ pypy/branch/fast-forward/pypy/rpython/ootypesystem/rclass.py Thu Sep 9 01:00:13 2010 @@ -194,6 +194,7 @@ self.lowleveltype._hints.update(hints) if self.classdef is None: + self.fields = {} self.allfields = {} self.allmethods = {} self.allclassattributes = {} @@ -210,6 +211,7 @@ allclassattributes = {} fields = {} + nonmangledfields = [] fielddefaults = {} if llfields: @@ -224,6 +226,7 @@ allfields[mangled] = repr oot = repr.lowleveltype fields[mangled] = oot + nonmangledfields.append(name) try: value = self.classdef.classdesc.read_attribute(name) fielddefaults[mangled] = repr.convert_desc_or_const(value) @@ -294,6 +297,7 @@ if not attrdef.s_value.is_constant(): classattributes[mangled] = attrdef.s_value, value + self.fields = nonmangledfields self.allfields = allfields self.allmethods = allmethods self.allclassattributes = allclassattributes Modified: pypy/branch/fast-forward/pypy/rpython/rbuiltin.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/rbuiltin.py (original) +++ pypy/branch/fast-forward/pypy/rpython/rbuiltin.py Thu Sep 9 01:00:13 2010 @@ -542,16 +542,25 @@ return hop.genop('raw_malloc_usage', [v_size], resulttype=lltype.Signed) def rtype_raw_free(hop): + s_addr = hop.args_s[0] + if s_addr.is_null_address(): + raise TyperError("raw_free(x) where x is the constant NULL") v_addr, = hop.inputargs(llmemory.Address) hop.exception_cannot_occur() return hop.genop('raw_free', [v_addr]) def rtype_raw_memcopy(hop): + for s_addr in hop.args_s[:2]: + if s_addr.is_null_address(): + raise TyperError("raw_memcopy() with a constant NULL") v_list = hop.inputargs(llmemory.Address, llmemory.Address, lltype.Signed) hop.exception_cannot_occur() return hop.genop('raw_memcopy', v_list) def rtype_raw_memclear(hop): + s_addr = hop.args_s[0] + if s_addr.is_null_address(): + raise TyperError("raw_memclear(x, n) where x is the constant NULL") v_list = hop.inputargs(llmemory.Address, lltype.Signed) return hop.genop('raw_memclear', v_list) Modified: pypy/branch/fast-forward/pypy/rpython/rclass.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/rclass.py (original) +++ pypy/branch/fast-forward/pypy/rpython/rclass.py Thu Sep 9 01:00:13 2010 @@ -9,6 +9,7 @@ class FieldListAccessor(object): def initialize(self, TYPE, fields): + assert type(fields) is dict self.TYPE = TYPE self.fields = fields @@ -18,6 +19,10 @@ def _freeze_(self): return True +class ImmutableConflictError(Exception): + """Raised when the _immutable_ or _immutable_fields_ hints are + not consistent across a class hierarchy.""" + def getclassrepr(rtyper, classdef): try: @@ -153,12 +158,16 @@ pass def _check_for_immutable_hints(self, hints): - if '_immutable_' in self.classdef.classdesc.classdict: + if self.classdef.classdesc.lookup('_immutable_') is not None: hints = hints.copy() hints['immutable'] = True - if '_immutable_fields_' in self.classdef.classdesc.classdict: + self.immutable_field_list = [] # unless overwritten below + if self.classdef.classdesc.lookup('_immutable_fields_') is not None: hints = hints.copy() - self.immutable_field_list = self.classdef.classdesc.classdict['_immutable_fields_'].value + immutable_fields = self.classdef.classdesc.classdict.get( + '_immutable_fields_') + if immutable_fields is not None: + self.immutable_field_list = immutable_fields.value accessor = FieldListAccessor() hints['immutable_fields'] = accessor return hints @@ -178,10 +187,20 @@ return 'InstanceR %s' % (clsname,) def _setup_repr_final(self): + self._setup_immutable_field_list() + self._check_for_immutable_conflicts() + + def _setup_immutable_field_list(self): hints = self.object_type._hints if "immutable_fields" in hints: accessor = hints["immutable_fields"] - self._parse_field_list(self.immutable_field_list, accessor) + if not hasattr(accessor, 'fields'): + immutable_fields = [] + rbase = self + while rbase.classdef is not None: + immutable_fields += rbase.immutable_field_list + rbase = rbase.rbase + self._parse_field_list(immutable_fields, accessor) def _parse_field_list(self, fields, accessor): with_suffix = {} @@ -191,11 +210,44 @@ suffix = '[*]' else: suffix = '' - mangled_name, r = self._get_field(name) + try: + mangled_name, r = self._get_field(name) + except KeyError: + continue with_suffix[mangled_name] = suffix accessor.initialize(self.object_type, with_suffix) return with_suffix + def _check_for_immutable_conflicts(self): + # check for conflicts, i.e. a field that is defined normally as + # mutable in some parent class but that is now declared immutable + from pypy.rpython.lltypesystem.lltype import Void + is_self_immutable = "immutable" in self.object_type._hints + base = self + while base.classdef is not None: + base = base.rbase + for fieldname in base.fields: + try: + mangled, r = base._get_field(fieldname) + except KeyError: + continue + if r.lowleveltype == Void: + continue + base._setup_immutable_field_list() + if base.object_type._immutable_field(mangled): + continue + # 'fieldname' is a mutable, non-Void field in the parent + if is_self_immutable: + raise ImmutableConflictError( + "class %r has _immutable_=True, but parent class %r " + "defines (at least) the mutable field %r" % ( + self, base, fieldname)) + if fieldname in self.immutable_field_list: + raise ImmutableConflictError( + "field %r is defined mutable in class %r, but " + "listed in _immutable_fields_ in subclass %r" % ( + fieldname, base, self)) + def new_instance(self, llops, classcallhop=None): raise NotImplementedError Modified: pypy/branch/fast-forward/pypy/rpython/rstr.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/rstr.py (original) +++ pypy/branch/fast-forward/pypy/rpython/rstr.py Thu Sep 9 01:00:13 2010 @@ -291,8 +291,8 @@ if not hop.args_s[1].is_constant(): raise TyperError("encoding must be constant") encoding = hop.args_s[1].const - if encoding == "ascii": - expect = self.lowleveltype # can be a UniChar + if encoding == "ascii" and self.lowleveltype == UniChar: + expect = UniChar # only for unichar.encode('ascii') else: expect = self.repr # must be a regular unicode string v_self = hop.inputarg(expect, 0) Modified: pypy/branch/fast-forward/pypy/rpython/test/test_extfunc.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/test/test_extfunc.py (original) +++ pypy/branch/fast-forward/pypy/rpython/test/test_extfunc.py Thu Sep 9 01:00:13 2010 @@ -6,150 +6,164 @@ from pypy.annotation.policy import AnnotatorPolicy from pypy.rpython.test.test_llinterp import interpret -def b(x): - return eval("x+40") +class TestExtFuncEntry: -class BTestFuncEntry(ExtFuncEntry): - _about_ = b - name = 'b' - signature_args = [annmodel.SomeInteger()] - signature_result = annmodel.SomeInteger() - -def test_annotation_b(): - def f(): - return b(1) - - policy = AnnotatorPolicy() - policy.allow_someobjects = False - a = RPythonAnnotator(policy=policy) - s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeInteger) - -def test_rtyping_b(): - def f(): - return b(2) - - res = interpret(f, []) - assert res == 42 - -def c(y, x): - yyy - -class CTestFuncEntry(ExtFuncEntry): - _about_ = c - name = 'ccc' - signature_args = [annmodel.SomeInteger()] * 2 - signature_result = annmodel.SomeInteger() - - def lltypeimpl(y, x): - return y + x - lltypeimpl = staticmethod(lltypeimpl) - -def test_interp_c(): - def f(): - return c(3, 4) - - res = interpret(f, []) - assert res == 7 - -def d(y): - return eval("y()") - -class DTestFuncEntry(ExtFuncEntry): - _about_ = d - name = 'd' - signature_args = [annmodel.SomeGenericCallable(args=[], result= - annmodel.SomeFloat())] - signature_result = annmodel.SomeFloat() - -def test_callback(): - def callback(): - return 2.5 - - def f(): - return d(callback) - - policy = AnnotatorPolicy() - policy.allow_someobjects = False - a = RPythonAnnotator(policy=policy) - s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeFloat) - assert a.translator._graphof(callback) - -def dd(): - pass - -register_external(dd, [int], int) - -def test_register_external_signature(): - def f(): - return dd(3) - - policy = AnnotatorPolicy() - policy.allow_someobjects = False - a = RPythonAnnotator(policy=policy) - s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeInteger) - - -def function_with_tuple_arg(): - """ - Dummy function which is declared via register_external to take a tuple as - an argument so that register_external's behavior for tuple-taking functions - can be verified. - """ -register_external(function_with_tuple_arg, [(int,)], int) - -def test_register_external_tuple_args(): - """ - Verify the annotation of a registered external function which takes a tuple - argument. - """ - def f(): - return function_with_tuple_arg((1,)) - - policy = AnnotatorPolicy() - policy.allow_someobjects = False - a = RPythonAnnotator(policy=policy) - s = a.build_types(f, []) - - # Not a very good assertion, but at least it means _something_ happened. - assert isinstance(s, annmodel.SomeInteger) - -def function_with_list(): - pass -register_external(function_with_list, [[int]], int) - -def function_returning_list(): - pass -register_external(function_returning_list, [], [int]) - -def test_register_external_return_goes_back(): - """ - Check whether it works to pass the same list from one external - fun to another - [bookkeeper and list joining issues] - """ - def f(): - return function_with_list(function_returning_list()) - - policy = AnnotatorPolicy() - policy.allow_someobjects = False - a = RPythonAnnotator(policy=policy) - s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeInteger) - -def function_withspecialcase(arg): - return repr(arg) -register_external(function_withspecialcase, args=None, result=str) - -def test_register_external_specialcase(): - def f(): - x = function_withspecialcase - return x(33) + x("aaa") + x([]) + "\n" - - policy = AnnotatorPolicy() - policy.allow_someobjects = False - a = RPythonAnnotator(policy=policy) - s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeString) + def test_basic(self): + """ + A ExtFuncEntry provides an annotation for a function, no need to flow + its graph. + """ + def b(x): + "NOT_RPYTHON" + return eval("x+40") + + class BTestFuncEntry(ExtFuncEntry): + _about_ = b + name = 'b' + signature_args = [annmodel.SomeInteger()] + signature_result = annmodel.SomeInteger() + + def f(): + return b(2) + + policy = AnnotatorPolicy() + policy.allow_someobjects = False + a = RPythonAnnotator(policy=policy) + s = a.build_types(f, []) + assert isinstance(s, annmodel.SomeInteger) + + res = interpret(f, []) + assert res == 42 + + def test_lltypeimpl(self): + """ + interpret() calls lltypeimpl instead of of the function/ + """ + def c(y, x): + yyy + + class CTestFuncEntry(ExtFuncEntry): + _about_ = c + name = 'ccc' + signature_args = [annmodel.SomeInteger()] * 2 + signature_result = annmodel.SomeInteger() + + def lltypeimpl(y, x): + return y + x + lltypeimpl = staticmethod(lltypeimpl) + + def f(): + return c(3, 4) + + res = interpret(f, []) + assert res == 7 + + def test_callback(self): + """ + Verify annotation when a callback function is in the arguments list. + """ + def d(y): + return eval("y()") + + class DTestFuncEntry(ExtFuncEntry): + _about_ = d + name = 'd' + signature_args = [annmodel.SomeGenericCallable(args=[], result= + annmodel.SomeFloat())] + signature_result = annmodel.SomeFloat() + + def callback(): + return 2.5 + + def f(): + return d(callback) + + policy = AnnotatorPolicy() + policy.allow_someobjects = False + a = RPythonAnnotator(policy=policy) + s = a.build_types(f, []) + assert isinstance(s, annmodel.SomeFloat) + assert a.translator._graphof(callback) + + def test_register_external_signature(self): + """ + Test the standard interface for external functions. + """ + def dd(): + pass + register_external(dd, [int], int) + + def f(): + return dd(3) + + policy = AnnotatorPolicy() + policy.allow_someobjects = False + a = RPythonAnnotator(policy=policy) + s = a.build_types(f, []) + assert isinstance(s, annmodel.SomeInteger) + + def test_register_external_tuple_args(self): + """ + Verify the annotation of a registered external function which takes a + tuple argument. + """ + + def function_with_tuple_arg(): + """ + Dummy function which is declared via register_external to take a + tuple as an argument so that register_external's behavior for + tuple-taking functions can be verified. + """ + register_external(function_with_tuple_arg, [(int,)], int) + + def f(): + return function_with_tuple_arg((1,)) + + policy = AnnotatorPolicy() + policy.allow_someobjects = False + a = RPythonAnnotator(policy=policy) + s = a.build_types(f, []) + + # Not a very good assertion, but at least it means _something_ happened. + assert isinstance(s, annmodel.SomeInteger) + + def test_register_external_return_goes_back(self): + """ + Check whether it works to pass the same list from one external + fun to another + [bookkeeper and list joining issues] + """ + def function_with_list(): + pass + register_external(function_with_list, [[int]], int) + + def function_returning_list(): + pass + register_external(function_returning_list, [], [int]) + + def f(): + return function_with_list(function_returning_list()) + + policy = AnnotatorPolicy() + policy.allow_someobjects = False + a = RPythonAnnotator(policy=policy) + s = a.build_types(f, []) + assert isinstance(s, annmodel.SomeInteger) + + def test_register_external_specialcase(self): + """ + When args=None, the external function accepts any arguments unmodified. + """ + def function_withspecialcase(arg): + return repr(arg) + register_external(function_withspecialcase, args=None, result=str) + + def f(): + x = function_withspecialcase + return x(33) + x("aaa") + x([]) + "\n" + + policy = AnnotatorPolicy() + policy.allow_someobjects = False + a = RPythonAnnotator(policy=policy) + s = a.build_types(f, []) + assert isinstance(s, annmodel.SomeString) Modified: pypy/branch/fast-forward/pypy/rpython/test/test_rclass.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/test/test_rclass.py (original) +++ pypy/branch/fast-forward/pypy/rpython/test/test_rclass.py Thu Sep 9 01:00:13 2010 @@ -738,27 +738,150 @@ assert accessor.fields == {"inst_x" : "", "inst_y" : "[*]"} or \ accessor.fields == {"ox" : "", "oy" : "[*]"} # for ootype - def test_immutable_inheritance(self): - class I(object): - def __init__(self, v): - self.v = v - - class J(I): + def test_immutable_fields_subclass_1(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + _immutable_fields_ = ["x"] + def __init__(self, x): + self.x = x + class B(A): + def __init__(self, x, y): + A.__init__(self, x) + self.y = y + + def f(): + return B(3, 5) + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + accessor = B_TYPE._hints["immutable_fields"] + assert accessor.fields == {"inst_x" : ""} or \ + accessor.fields == {"ox" : ""} # for ootype + + def test_immutable_fields_subclass_2(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + _immutable_fields_ = ["x"] + def __init__(self, x): + self.x = x + class B(A): + _immutable_fields_ = ["y"] + def __init__(self, x, y): + A.__init__(self, x) + self.y = y + + def f(): + return B(3, 5) + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + accessor = B_TYPE._hints["immutable_fields"] + assert accessor.fields == {"inst_x" : "", "inst_y" : ""} or \ + accessor.fields == {"ox" : "", "oy" : ""} # for ootype + + def test_immutable_fields_only_in_subclass(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + def __init__(self, x): + self.x = x + class B(A): + _immutable_fields_ = ["y"] + def __init__(self, x, y): + A.__init__(self, x) + self.y = y + + def f(): + return B(3, 5) + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + accessor = B_TYPE._hints["immutable_fields"] + assert accessor.fields == {"inst_y" : ""} or \ + accessor.fields == {"oy" : ""} # for ootype + + def test_immutable_forbidden_inheritance_1(self): + from pypy.rpython.rclass import ImmutableConflictError + class A(object): + pass + class B(A): + _immutable_fields_ = ['v'] + def f(): + A().v = 123 + B() # crash: class B says 'v' is immutable, + # but it is defined on parent class A + py.test.raises(ImmutableConflictError, self.gengraph, f, []) + + def test_immutable_forbidden_inheritance_2(self): + from pypy.rpython.rclass import ImmutableConflictError + class A(object): + pass + class B(A): _immutable_ = True - def __init__(self, v, w): - self.w = w - I.__init__(self, v) - - j = J(3, 4) - def f(): - j.v = j.v * 1 # make the annotator think it is mutated - j.w = j.w * 1 # make the annotator think it is mutated - return j.v + j.w - - t, typer, graph = self.gengraph(f, [], backendopt=True) - f_summary = summary(graph) - assert f_summary == {"setfield": 2} or \ - f_summary == {"oosetfield": 2} # for ootype + def f(): + A().v = 123 + B() # crash: class B has _immutable_ = True + # but class A defines 'v' to be mutable + py.test.raises(ImmutableConflictError, self.gengraph, f, []) + + def test_immutable_ok_inheritance_2(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + _immutable_fields_ = ['v'] + class B(A): + _immutable_ = True + def f(): + A().v = 123 + B().w = 456 + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] + try: + A_TYPE = B_TYPE.super + except AttributeError: + A_TYPE = B_TYPE._superclass # for ootype + accessor = A_TYPE._hints["immutable_fields"] + assert accessor.fields == {"inst_v" : ""} or \ + accessor.fields == {"ov" : ""} # for ootype + + def test_immutable_subclass_1(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + _immutable_ = True + class B(A): + pass + def f(): + B().v = 123 + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] # inherited from A + + def test_immutable_subclass_2(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + pass + class B(A): + _immutable_ = True + def f(): + B().v = 123 + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] + + def test_immutable_subclass_void(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + pass + class B(A): + _immutable_ = True + def myfunc(): + pass + def f(): + A().f = myfunc # it's ok to add Void attributes to A + B().v = 123 # even though only B is declared _immutable_ + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + assert B_TYPE._hints["immutable"] class TestLLtype(BaseTestRclass, LLRtypeMixin): Modified: pypy/branch/fast-forward/pypy/rpython/test/test_rint.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/test/test_rint.py (original) +++ pypy/branch/fast-forward/pypy/rpython/test/test_rint.py Thu Sep 9 01:00:13 2010 @@ -117,10 +117,10 @@ assert self.ll_to_string(res) == '413974738222117' def test_unsigned(self): - bigvalue = sys.maxint + 17 + bigvalue = r_uint(sys.maxint + 17) def dummy(i): i = r_uint(i) - j = r_uint(bigvalue) + j = bigvalue return i < j res = self.interpret(dummy,[0]) Modified: pypy/branch/fast-forward/pypy/rpython/tool/rfficache.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/tool/rfficache.py (original) +++ pypy/branch/fast-forward/pypy/rpython/tool/rfficache.py Thu Sep 9 01:00:13 2010 @@ -29,7 +29,7 @@ } ''' % (include_string, add_source, str(question))) c_file = udir.join("gcctest.c") - c_file.write(c_source) + c_file.write(str(c_source) + '\n') eci = ExternalCompilationInfo() return build_executable_cache([c_file], eci) Modified: pypy/branch/fast-forward/pypy/tool/release/package.py ============================================================================== --- pypy/branch/fast-forward/pypy/tool/release/package.py (original) +++ pypy/branch/fast-forward/pypy/tool/release/package.py Thu Sep 9 01:00:13 2010 @@ -11,11 +11,12 @@ import py import os import fnmatch -import tarfile from pypy.tool.udir import udir if sys.version_info < (2,6): py.test.skip("requires 2.6 so far") +USE_TARFILE_MODULE = sys.platform == 'win32' + def ignore_patterns(*patterns): """Function that can be used as copytree() ignore parameter. @@ -69,9 +70,17 @@ old_dir = os.getcwd() try: os.chdir(str(builddir)) - os.system("strip " + str(archive_pypy_c)) - os.system('tar cvjf ' + str(builddir.join(name + '.tar.bz2')) + - " " + name) + os.system("strip " + str(archive_pypy_c)) # ignore errors + if USE_TARFILE_MODULE: + import tarfile + tf = tarfile.open(str(builddir.join(name + '.tar.bz2')), 'w:bz2') + tf.add(name) + tf.close() + else: + e = os.system('tar cvjf ' + str(builddir.join(name + '.tar.bz2')) + + " " + name) + if e: + raise OSError('"tar" returned exit status %r' % e) finally: os.chdir(old_dir) if copy_to_dir is not None: Modified: pypy/branch/fast-forward/pypy/tool/release/test/test_package.py ============================================================================== --- pypy/branch/fast-forward/pypy/tool/release/test/test_package.py (original) +++ pypy/branch/fast-forward/pypy/tool/release/test/test_package.py Thu Sep 9 01:00:13 2010 @@ -5,7 +5,7 @@ from pypy.module.sys.version import CPYTHON_VERSION import tarfile, os -def test_dir_structure(): +def test_dir_structure(test='test'): # make sure we have sort of pypy-c pypy_c = py.path.local(pypydir).join('translator', 'goal', 'pypy-c') if not pypy_c.check(): @@ -14,8 +14,8 @@ else: fake_pypy_c = False try: - builddir = package(py.path.local(pypydir).dirpath(), 'test') - prefix = builddir.join('test') + builddir = package(py.path.local(pypydir).dirpath(), test) + prefix = builddir.join(test) cpyver = '%d.%d.%d' % CPYTHON_VERSION[:3] assert prefix.join('lib-python', cpyver, 'test').check() assert prefix.join('bin', 'pypy-c').check() @@ -24,18 +24,27 @@ assert not prefix.join('lib_pypy', 'ctypes_configure').check() assert prefix.join('LICENSE').check() assert prefix.join('README').check() - th = tarfile.open(str(builddir.join('test.tar.bz2'))) - assert th.getmember('test/lib_pypy/syslog.py') + th = tarfile.open(str(builddir.join('%s.tar.bz2' % test))) + assert th.getmember('%s/lib_pypy/syslog.py' % test) # the headers file could be not there, because they are copied into # trunk/include only during translation includedir = py.path.local(pypydir).dirpath().join('include') def check_include(name): if includedir.join(name).check(file=True): - assert th.getmember('test/include/%s' % name) + assert th.getmember('%s/include/%s' % (test, name)) check_include('Python.h') check_include('modsupport.inl') check_include('pypy_decl.h') finally: if fake_pypy_c: pypy_c.remove() + +def test_with_tarfile_module(): + from pypy.tool.release import package + prev = package.USE_TARFILE_MODULE + try: + package.USE_TARFILE_MODULE = True + test_dir_structure(test='testtarfile') + finally: + package.USE_TARFILE_MODULE = prev Modified: pypy/branch/fast-forward/pypy/tool/runsubprocess.py ============================================================================== --- pypy/branch/fast-forward/pypy/tool/runsubprocess.py (original) +++ pypy/branch/fast-forward/pypy/tool/runsubprocess.py Thu Sep 9 01:00:13 2010 @@ -70,5 +70,5 @@ assert results.startswith('(') results = eval(results) if results[0] is None: - raise OSError(results[1]) + raise OSError('%s: %s' % (args[0], results[1])) return results Modified: pypy/branch/fast-forward/pypy/translator/backendopt/test/test_constfold.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/backendopt/test/test_constfold.py (original) +++ pypy/branch/fast-forward/pypy/translator/backendopt/test/test_constfold.py Thu Sep 9 01:00:13 2010 @@ -49,7 +49,7 @@ accessor = rclass.FieldListAccessor() S2 = lltype.GcStruct('S2', ('x', lltype.Signed), hints={'immutable_fields': accessor}) - accessor.initialize(S2, ['x']) + accessor.initialize(S2, {'x': ''}) test_simple(S2) Modified: pypy/branch/fast-forward/pypy/translator/c/database.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/database.py (original) +++ pypy/branch/fast-forward/pypy/translator/c/database.py Thu Sep 9 01:00:13 2010 @@ -213,7 +213,7 @@ forcename = self.idelayedfunctionnames[obj][0] node = self.getcontainernode(container, forcename=forcename) - assert node.ptrname == forcename + assert node.getptrname() == forcename return forcename # /hack hack hack @@ -222,7 +222,7 @@ return '((%s) %d)' % (cdecl(self.gettype(T), ''), obj._obj) node = self.getcontainernode(container) - return node.ptrname + return node.getptrname() else: return '((%s) NULL)' % (cdecl(self.gettype(T), ''), ) else: Modified: pypy/branch/fast-forward/pypy/translator/c/gc.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/gc.py (original) +++ pypy/branch/fast-forward/pypy/translator/c/gc.py Thu Sep 9 01:00:13 2010 @@ -172,7 +172,9 @@ defnode = db.gettypedefnode(obj.about) self.implementationtypename = 'void (@)(void *)' self.name = defnode.gcinfo.static_deallocator - self.ptrname = '((void (*)(void *)) %s)' % (self.name,) + + def getptrname(self): + return '((void (*)(void *)) %s)' % (self.name,) def enum_dependencies(self): return [] @@ -266,7 +268,9 @@ defnode = db.gettypedefnode(obj.about) self.implementationtypename = self.typename self.name = self.db.namespace.uniquename('g_rtti_v_'+ defnode.barename) - self.ptrname = '(&%s)' % (self.name,) + + def getptrname(self): + return '(&%s)' % (self.name,) def enum_dependencies(self): return [] Modified: pypy/branch/fast-forward/pypy/translator/c/gcc/instruction.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/gcc/instruction.py (original) +++ pypy/branch/fast-forward/pypy/translator/c/gcc/instruction.py Thu Sep 9 01:00:13 2010 @@ -5,6 +5,14 @@ LOC_MASK = 0x03 LOC_NOWHERE = LOC_REG | 0 +# x86-32 registers sometimes used to pass arguments when gcc optimizes +# a function's calling convention +ARGUMENT_REGISTERS_32 = ('%eax', '%edx', '%ecx') + +# x86-64 registers used to pass arguments +ARGUMENT_REGISTERS_64 = ('%rdi', '%rsi', '%rdx', '%rcx', '%r8', '%r9') + + def frameloc_esp(offset): assert offset >= 0 assert offset % 4 == 0 @@ -19,7 +27,8 @@ class SomeNewValue(object): - pass + def __repr__(self): + return 'somenewvalue' somenewvalue = SomeNewValue() class LocalVar(object): @@ -42,7 +51,7 @@ else: return 1 - def getlocation(self, framesize, uses_frame_pointer): + def getlocation(self, framesize, uses_frame_pointer, wordsize): if (self.hint == 'esp' or not uses_frame_pointer or self.ofs_from_frame_end % 2 != 0): # try to use esp-relative addressing @@ -52,7 +61,7 @@ # we can get an odd value if the framesize is marked as bogus # by visit_andl() assert uses_frame_pointer - ofs_from_ebp = self.ofs_from_frame_end + 4 + ofs_from_ebp = self.ofs_from_frame_end + wordsize return frameloc_ebp(ofs_from_ebp) @@ -81,22 +90,28 @@ self.previous_insns = [] # all insns that jump (or fallthrough) here class InsnFunctionStart(Insn): + _args_ = ['arguments'] framesize = 0 previous_insns = () - def __init__(self, registers): + def __init__(self, registers, wordsize): self.arguments = {} for reg in registers: self.arguments[reg] = somenewvalue + self.wordsize = wordsize def source_of(self, localvar, tag): if localvar not in self.arguments: - if localvar in ('%eax', '%edx', '%ecx'): + if self.wordsize == 4 and localvar in ARGUMENT_REGISTERS_32: # xxx this might show a bug in trackgcroot.py failing to # figure out which instruction stored a value in these # registers. However, this case also occurs when the # the function's calling convention was optimized by gcc: # the 3 registers above are then used to pass arguments pass + elif self.wordsize == 8 and localvar in ARGUMENT_REGISTERS_64: + # this is normal: these registers are always used to + # pass arguments + pass else: assert (isinstance(localvar, LocalVar) and localvar.ofs_from_frame_end > 0), ( @@ -218,15 +233,16 @@ return {self.loc: None} class InsnPrologue(Insn): + def __init__(self, wordsize): + self.wordsize = wordsize def __setattr__(self, attr, value): if attr == 'framesize': - assert value == 4, ("unrecognized function prologue - " - "only supports push %ebp; movl %esp, %ebp") + assert value == self.wordsize, ( + "unrecognized function prologue - " + "only supports push %ebp; movl %esp, %ebp") Insn.__setattr__(self, attr, value) class InsnEpilogue(Insn): def __init__(self, framesize=None): if framesize is not None: self.framesize = framesize - - Modified: pypy/branch/fast-forward/pypy/translator/c/gcc/test/conftest.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/gcc/test/conftest.py (original) +++ pypy/branch/fast-forward/pypy/translator/c/gcc/test/conftest.py Thu Sep 9 01:00:13 2010 @@ -1,8 +1,6 @@ import py from pypy.jit.backend import detect_cpu - cpu = detect_cpu.autodetect() def pytest_runtest_setup(item): - if cpu != 'x86': + if cpu not in ('x86', 'x86_64'): py.test.skip("x86 directory skipped: cpu is %r" % (cpu,)) - Modified: pypy/branch/fast-forward/pypy/translator/c/gcc/test/test_trackgcroot.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/gcc/test/test_trackgcroot.py (original) +++ pypy/branch/fast-forward/pypy/translator/c/gcc/test/test_trackgcroot.py Thu Sep 9 01:00:13 2010 @@ -1,51 +1,52 @@ import py import sys, re -from pypy.translator.c.gcc.trackgcroot import format_location -from pypy.translator.c.gcc.trackgcroot import format_callshape from pypy.translator.c.gcc.trackgcroot import LOC_NOWHERE, LOC_REG from pypy.translator.c.gcc.trackgcroot import LOC_EBP_PLUS, LOC_EBP_MINUS from pypy.translator.c.gcc.trackgcroot import LOC_ESP_PLUS from pypy.translator.c.gcc.trackgcroot import ElfAssemblerParser from pypy.translator.c.gcc.trackgcroot import DarwinAssemblerParser -from pypy.translator.c.gcc.trackgcroot import compress_callshape -from pypy.translator.c.gcc.trackgcroot import decompress_callshape from pypy.translator.c.gcc.trackgcroot import PARSERS +from pypy.translator.c.gcc.trackgcroot import ElfFunctionGcRootTracker32 from StringIO import StringIO +import py.test this_dir = py.path.local(__file__).dirpath() def test_format_location(): - assert format_location(LOC_NOWHERE) == '?' - assert format_location(LOC_REG | (1<<2)) == '%ebx' - assert format_location(LOC_REG | (2<<2)) == '%esi' - assert format_location(LOC_REG | (3<<2)) == '%edi' - assert format_location(LOC_REG | (4<<2)) == '%ebp' - assert format_location(LOC_EBP_PLUS + 0) == '(%ebp)' - assert format_location(LOC_EBP_PLUS + 4) == '4(%ebp)' - assert format_location(LOC_EBP_MINUS + 4) == '-4(%ebp)' - assert format_location(LOC_ESP_PLUS + 0) == '(%esp)' - assert format_location(LOC_ESP_PLUS + 4) == '4(%esp)' + cls = ElfFunctionGcRootTracker32 + assert cls.format_location(LOC_NOWHERE) == '?' + assert cls.format_location(LOC_REG | (1<<2)) == '%ebx' + assert cls.format_location(LOC_REG | (2<<2)) == '%esi' + assert cls.format_location(LOC_REG | (3<<2)) == '%edi' + assert cls.format_location(LOC_REG | (4<<2)) == '%ebp' + assert cls.format_location(LOC_EBP_PLUS + 0) == '(%ebp)' + assert cls.format_location(LOC_EBP_PLUS + 4) == '4(%ebp)' + assert cls.format_location(LOC_EBP_MINUS + 4) == '-4(%ebp)' + assert cls.format_location(LOC_ESP_PLUS + 0) == '(%esp)' + assert cls.format_location(LOC_ESP_PLUS + 4) == '4(%esp)' def test_format_callshape(): + cls = ElfFunctionGcRootTracker32 expected = ('{4(%ebp) ' # position of the return address '| 8(%ebp), 12(%ebp), 16(%ebp), 20(%ebp) ' # 4 saved regs '| 24(%ebp), 28(%ebp)}') # GC roots - assert format_callshape((LOC_EBP_PLUS+4, - LOC_EBP_PLUS+8, - LOC_EBP_PLUS+12, - LOC_EBP_PLUS+16, - LOC_EBP_PLUS+20, - LOC_EBP_PLUS+24, - LOC_EBP_PLUS+28)) == expected + assert cls.format_callshape((LOC_EBP_PLUS+4, + LOC_EBP_PLUS+8, + LOC_EBP_PLUS+12, + LOC_EBP_PLUS+16, + LOC_EBP_PLUS+20, + LOC_EBP_PLUS+24, + LOC_EBP_PLUS+28)) == expected def test_compress_callshape(): + cls = ElfFunctionGcRootTracker32 shape = (1, 127, 0x1234, 0x5678, 0x234567, 0x765432, 0x61626364, 0x41424344) - bytes = list(compress_callshape(shape)) + bytes = list(cls.compress_callshape(shape)) print bytes assert len(bytes) == 1+1+2+3+4+4+5+5+1 - assert decompress_callshape(bytes) == list(shape) + assert cls.decompress_callshape(bytes) == list(shape) def test_find_functions_elf(): source = """\ @@ -108,7 +109,7 @@ def test_computegcmaptable(): tests = [] - for format in ('elf', 'darwin', 'msvc'): + for format in ('elf', 'darwin', 'msvc', 'elf64'): for path in this_dir.join(format).listdir("track*.s"): n = path.purebasename[5:] try: @@ -138,7 +139,7 @@ tabledict = {} seen = {} for entry in table: - print '%s: %s' % (entry[0], format_callshape(entry[1])) + print '%s: %s' % (entry[0], tracker.format_callshape(entry[1])) tabledict[entry[0]] = entry[1] # find the ";; expected" lines prevline = "" @@ -151,7 +152,7 @@ label = prevmatch.group(1) assert label in tabledict got = tabledict[label] - assert format_callshape(got) == expected + assert tracker.format_callshape(got) == expected seen[label] = True if format == 'msvc': expectedlines.insert(i-2, 'PUBLIC\t%s\n' % (label,)) Modified: pypy/branch/fast-forward/pypy/translator/c/gcc/trackgcroot.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/gcc/trackgcroot.py (original) +++ pypy/branch/fast-forward/pypy/translator/c/gcc/trackgcroot.py Thu Sep 9 01:00:13 2010 @@ -72,7 +72,7 @@ if self.is_stack_bottom: retaddr = LOC_NOWHERE # end marker for asmgcroot.py elif self.uses_frame_pointer: - retaddr = frameloc_ebp(4) + retaddr = frameloc_ebp(self.WORD) else: retaddr = frameloc_esp(insn.framesize) shape = [retaddr] @@ -84,7 +84,8 @@ for localvar, tag in insn.gcroots.items(): if isinstance(localvar, LocalVar): loc = localvar.getlocation(insn.framesize, - self.uses_frame_pointer) + self.uses_frame_pointer, + self.WORD) elif localvar in self.REG2LOC: loc = self.REG2LOC[localvar] else: @@ -148,7 +149,7 @@ lst.append(previnsn) def parse_instructions(self): - self.insns = [InsnFunctionStart(self.CALLEE_SAVE_REGISTERS)] + self.insns = [InsnFunctionStart(self.CALLEE_SAVE_REGISTERS, self.WORD)] ignore_insns = False for lineno, line in enumerate(self.lines): if lineno < self.skip: @@ -263,7 +264,7 @@ ofs_from_ebp = int(match.group(1) or '0') if self.format == 'msvc': ofs_from_ebp += int(match.group(2) or '0') - localvar = ofs_from_ebp - 4 + localvar = ofs_from_ebp - self.WORD assert localvar != 0 # that's the return address return LocalVar(localvar, hint='ebp') return localvar @@ -357,6 +358,56 @@ self.lines.insert(call.lineno+1, '\t.globl\t%s\n' % (label,)) call.global_label = label + @classmethod + def compress_callshape(cls, shape): + # For a single shape, this turns the list of integers into a list of + # bytes and reverses the order of the entries. The length is + # encoded by inserting a 0 marker after the gc roots coming from + # shape[N:] and before the N values coming from shape[N-1] to + # shape[0] (for N == 5 on 32-bit or 7 on 64-bit platforms). + # In practice it seems that shapes contain many integers + # whose value is up to a few thousands, which the algorithm below + # compresses down to 2 bytes. Very small values compress down to a + # single byte. + + # Callee-save regs plus ret addr + min_size = len(cls.CALLEE_SAVE_REGISTERS) + 1 + + assert len(shape) >= min_size + shape = list(shape) + assert 0 not in shape[min_size:] + shape.insert(min_size, 0) + result = [] + for loc in shape: + assert loc >= 0 + flag = 0 + while loc >= 0x80: + result.append(int(loc & 0x7F) | flag) + flag = 0x80 + loc >>= 7 + result.append(int(loc) | flag) + result.reverse() + return result + + @classmethod + def decompress_callshape(cls, bytes): + # For tests. This logic is copied in asmgcroot.py. + result = [] + n = 0 + while n < len(bytes): + value = 0 + while True: + b = bytes[n] + n += 1 + value += b + if b < 0x80: + break + value = (value - 0x80) << 7 + result.append(value) + result.reverse() + assert result[5] == 0 + del result[5] + return result # ____________________________________________________________ CANNOT_COLLECT = { # some of the most used functions that cannot collect @@ -375,7 +426,7 @@ IGNORE_OPS_WITH_PREFIXES = dict.fromkeys([ 'cmp', 'test', 'set', 'sahf', 'lahf', 'cltd', 'cld', 'std', - 'rep', 'movs', 'lods', 'stos', 'scas', 'cwtl', 'prefetch', + 'rep', 'movs', 'lods', 'stos', 'scas', 'cwtl', 'cwde', 'prefetch', # floating-point operations cannot produce GC pointers 'f', 'cvt', 'ucomi', 'comi', 'subs', 'subp' , 'adds', 'addp', 'xorp', @@ -385,10 +436,9 @@ 'inc', 'dec', 'not', 'neg', 'or', 'and', 'sbb', 'adc', 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', + 'punpck', 'pshufd', # zero-extending moves should not produce GC pointers 'movz', - # quadword operations - 'movq', ]) visit_movb = visit_nop @@ -400,7 +450,7 @@ visit_xorb = visit_nop visit_xorw = visit_nop - def visit_addl(self, line, sign=+1): + def _visit_add(self, line, sign=+1): match = self.r_binaryinsn.match(line) source = match.group("source") target = match.group("target") @@ -415,8 +465,8 @@ else: return [] - def visit_subl(self, line): - return self.visit_addl(line, sign=-1) + def _visit_sub(self, line): + return self._visit_add(line, sign=-1) def unary_insn(self, line): match = self.r_unaryinsn.match(line) @@ -439,8 +489,6 @@ else: return [] - visit_xorl = binary_insn # used in "xor reg, reg" to create a NULL GC ptr - visit_orl = binary_insn # The various cmov* operations for name in ''' e ne g ge l le a ae b be p np s ns o no @@ -448,7 +496,7 @@ locals()['visit_cmov' + name] = binary_insn locals()['visit_cmov' + name + 'l'] = binary_insn - def visit_andl(self, line): + def _visit_and(self, line): match = self.r_binaryinsn.match(line) target = match.group("target") if target == self.ESP: @@ -460,9 +508,7 @@ else: return self.binary_insn(line) - visit_and = visit_andl - - def visit_leal(self, line): + def _visit_lea(self, line): match = self.r_binaryinsn.match(line) target = match.group("target") if target == self.ESP: @@ -474,7 +520,7 @@ raise UnrecognizedOperation('epilogue without prologue') ofs_from_ebp = int(match.group(1) or '0') assert ofs_from_ebp <= 0 - framesize = 4 - ofs_from_ebp + framesize = self.WORD - ofs_from_ebp else: match = self.r_localvar_esp.match(source) # leal 12(%esp), %esp @@ -489,17 +535,23 @@ def insns_for_copy(self, source, target): source = self.replace_symbols(source) target = self.replace_symbols(target) - if source == self.ESP or target == self.ESP: + if target == self.ESP: raise UnrecognizedOperation('%s -> %s' % (source, target)) elif self.r_localvar.match(target): if self.r_localvar.match(source): + # eg, movl %eax, %ecx: possibly copies a GC root return [InsnCopyLocal(source, target)] else: + # eg, movl (%eax), %edi or mov %esp, %edi: load a register + # from "outside". If it contains a pointer to a GC root, + # it will be announced later with the GCROOT macro. return [InsnSetLocal(target, [source])] else: + # eg, movl %ebx, (%edx) or mov %ebp, %esp: does not write into + # a general register return [] - def visit_movl(self, line): + def _visit_mov(self, line): match = self.r_binaryinsn.match(line) source = match.group("source") target = match.group("target") @@ -513,34 +565,24 @@ # gcc -fno-unit-at-a-time. return self.insns_for_copy(source, target) - visit_mov = visit_movl - - def visit_pushl(self, line): + def _visit_push(self, line): match = self.r_unaryinsn.match(line) source = match.group(1) - return [InsnStackAdjust(-4)] + self.insns_for_copy(source, self.TOP_OF_STACK) - - def visit_pushw(self, line): - return [InsnStackAdjust(-2)] # rare but not impossible + return [InsnStackAdjust(-self.WORD)] + self.insns_for_copy(source, self.TOP_OF_STACK) def _visit_pop(self, target): - return self.insns_for_copy(self.TOP_OF_STACK, target) + [InsnStackAdjust(+4)] - - def visit_popl(self, line): - match = self.r_unaryinsn.match(line) - target = match.group(1) - return self._visit_pop(target) + return self.insns_for_copy(self.TOP_OF_STACK, target) + [InsnStackAdjust(+self.WORD)] def _visit_prologue(self): # for the prologue of functions that use %ebp as frame pointer self.uses_frame_pointer = True self.r_localvar = self.r_localvarfp - return [InsnPrologue()] + return [InsnPrologue(self.WORD)] def _visit_epilogue(self): if not self.uses_frame_pointer: raise UnrecognizedOperation('epilogue without prologue') - return [InsnEpilogue(4)] + return [InsnEpilogue(self.WORD)] def visit_leave(self, line): return self._visit_epilogue() + self._visit_pop(self.EBP) @@ -662,7 +704,7 @@ visit_jc = conditional_jump visit_jnc = conditional_jump - def visit_xchgl(self, line): + def _visit_xchg(self, line): # only support the format used in VALGRIND_DISCARD_TRANSLATIONS # which is to use a marker no-op "xchgl %ebx, %ebx" match = self.r_binaryinsn.match(line) @@ -741,8 +783,172 @@ insns.append(InsnStackAdjust(16)) return insns + # __________ debugging output __________ + + @classmethod + def format_location(cls, loc): + # A 'location' is a single number describing where a value is stored + # across a call. It can be in one of the CALLEE_SAVE_REGISTERS, or + # in the stack frame at an address relative to either %esp or %ebp. + # The last two bits of the location number are used to tell the cases + # apart; see format_location(). + assert loc >= 0 + kind = loc & LOC_MASK + if kind == LOC_REG: + if loc == LOC_NOWHERE: + return '?' + reg = (loc >> 2) - 1 + return '%' + cls.CALLEE_SAVE_REGISTERS[reg].replace("%", "") + else: + offset = loc & ~ LOC_MASK + if kind == LOC_EBP_PLUS: + result = '(%' + cls.EBP.replace("%", "") + ')' + elif kind == LOC_EBP_MINUS: + result = '(%' + cls.EBP.replace("%", "") + ')' + offset = -offset + elif kind == LOC_ESP_PLUS: + result = '(%' + cls.ESP.replace("%", "") + ')' + else: + assert 0, kind + if offset != 0: + result = str(offset) + result + return result + + @classmethod + def format_callshape(cls, shape): + # A 'call shape' is a tuple of locations in the sense of + # format_location(). They describe where in a function frame + # interesting values are stored, when this function executes a 'call' + # instruction. + # + # shape[0] is the location that stores the fn's own return + # address (not the return address for the currently + # executing 'call') + # + # shape[1..N] is where the fn saved its own caller's value of a + # certain callee save register. (where N is the number + # of callee save registers.) + # + # shape[>N] are GC roots: where the fn has put its local GCPTR + # vars + # + num_callee_save_regs = len(cls.CALLEE_SAVE_REGISTERS) + assert isinstance(shape, tuple) + # + 1 for the return address + assert len(shape) >= (num_callee_save_regs + 1) + result = [cls.format_location(loc) for loc in shape] + return '{%s | %s | %s}' % (result[0], + ', '.join(result[1:(num_callee_save_regs+1)]), + ', '.join(result[(num_callee_save_regs+1):])) + + +class FunctionGcRootTracker32(FunctionGcRootTracker): + WORD = 4 + + visit_mov = FunctionGcRootTracker._visit_mov + visit_movl = FunctionGcRootTracker._visit_mov + visit_pushl = FunctionGcRootTracker._visit_push + visit_leal = FunctionGcRootTracker._visit_lea + + visit_addl = FunctionGcRootTracker._visit_add + visit_subl = FunctionGcRootTracker._visit_sub + visit_andl = FunctionGcRootTracker._visit_and + visit_and = FunctionGcRootTracker._visit_and + + visit_xchgl = FunctionGcRootTracker._visit_xchg + + # used in "xor reg, reg" to create a NULL GC ptr + visit_xorl = FunctionGcRootTracker.binary_insn + visit_orl = FunctionGcRootTracker.binary_insn # unsure about this one + + # occasionally used on 32-bits to move floats around + visit_movq = FunctionGcRootTracker.visit_nop + + def visit_pushw(self, line): + return [InsnStackAdjust(-2)] # rare but not impossible -class ElfFunctionGcRootTracker(FunctionGcRootTracker): + def visit_popl(self, line): + match = self.r_unaryinsn.match(line) + target = match.group(1) + return self._visit_pop(target) + +class FunctionGcRootTracker64(FunctionGcRootTracker): + WORD = 8 + + # Regex ignores destination + r_save_xmm_register = re.compile(r"\tmovaps\s+%xmm(\d+)") + + def _maybe_32bit_dest(func): + def wrapper(self, line): + # Using a 32-bit reg as a destination in 64-bit mode zero-extends + # to 64-bits, so sometimes gcc uses a 32-bit operation to copy a + # statically known pointer to a register + + # %eax -> %rax + new_line = re.sub(r"%e(ax|bx|cx|dx|di|si)$", r"%r\1", line) + # %r10d -> %r10 + new_line = re.sub(r"%r(\d+)d$", r"%r\1", new_line) + return func(self, new_line) + return wrapper + + visit_addl = FunctionGcRootTracker.visit_nop + visit_subl = FunctionGcRootTracker.visit_nop + visit_leal = FunctionGcRootTracker.visit_nop + + visit_cltq = FunctionGcRootTracker.visit_nop + + visit_movq = FunctionGcRootTracker._visit_mov + # just a special assembler mnemonic for mov + visit_movabsq = FunctionGcRootTracker._visit_mov + visit_mov = _maybe_32bit_dest(FunctionGcRootTracker._visit_mov) + visit_movl = visit_mov + + visit_xorl = _maybe_32bit_dest(FunctionGcRootTracker.binary_insn) + + visit_pushq = FunctionGcRootTracker._visit_push + + visit_addq = FunctionGcRootTracker._visit_add + visit_subq = FunctionGcRootTracker._visit_sub + + visit_leaq = FunctionGcRootTracker._visit_lea + + visit_xorq = FunctionGcRootTracker.binary_insn + + # FIXME: similar to visit_popl for 32-bit + def visit_popq(self, line): + match = self.r_unaryinsn.match(line) + target = match.group(1) + return self._visit_pop(target) + + def visit_jmp(self, line): + # On 64-bit, %al is used when calling varargs functions to specify an + # upper-bound on the number of xmm registers used in the call. gcc + # uses %al to compute an indirect jump that looks like: + # + # jmp *[some register] + # movaps %xmm7, [stack location] + # movaps %xmm6, [stack location] + # movaps %xmm5, [stack location] + # movaps %xmm4, [stack location] + # movaps %xmm3, [stack location] + # movaps %xmm2, [stack location] + # movaps %xmm1, [stack location] + # movaps %xmm0, [stack location] + # + # The jmp is always to somewhere in the block of "movaps" + # instructions, according to how many xmm registers need to be saved + # to the stack. The point of all this is that we can safely ignore + # jmp instructions of that form. + if (self.currentlineno + 8) < len(self.lines) and self.r_unaryinsn_star.match(line): + matches = [self.r_save_xmm_register.match(self.lines[self.currentlineno + 1 + i]) for i in range(8)] + if all(m and int(m.group(1)) == (7 - i) for i, m in enumerate(matches)): + return [] + + return FunctionGcRootTracker.visit_jmp(self, line) + + + +class ElfFunctionGcRootTracker32(FunctionGcRootTracker32): format = 'elf' ESP = '%esp' @@ -791,7 +997,65 @@ match = self.r_functionend.match(lines[-1]) assert funcname == match.group(1) assert funcname == match.group(2) - super(ElfFunctionGcRootTracker, self).__init__( + super(ElfFunctionGcRootTracker32, self).__init__( + funcname, lines, filetag) + + def extract_immediate(self, value): + if not value.startswith('$'): + return None + return int(value[1:]) + +ElfFunctionGcRootTracker32.init_regexp() + +class ElfFunctionGcRootTracker64(FunctionGcRootTracker64): + format = 'elf64' + ESP = '%rsp' + EBP = '%rbp' + EAX = '%rax' + CALLEE_SAVE_REGISTERS = ['%rbx', '%r12', '%r13', '%r14', '%r15', '%rbp'] + REG2LOC = dict((_reg, LOC_REG | ((_i+1)<<2)) + for _i, _reg in enumerate(CALLEE_SAVE_REGISTERS)) + OPERAND = r'(?:[-\w$%+.:@"]+(?:[(][\w%,]+[)])?|[(][\w%,]+[)])' + LABEL = r'([a-zA-Z_$.][a-zA-Z0-9_$@.]*)' + OFFSET_LABELS = 2**30 + TOP_OF_STACK = '0(%rsp)' + + r_functionstart = re.compile(r"\t.type\s+"+LABEL+",\s*[@]function\s*$") + r_functionend = re.compile(r"\t.size\s+"+LABEL+",\s*[.]-"+LABEL+"\s*$") + LOCALVAR = r"%rax|%rbx|%rcx|%rdx|%rdi|%rsi|%rbp|%r8|%r9|%r10|%r11|%r12|%r13|%r14|%r15|-?\d*[(]%rsp[)]" + LOCALVARFP = LOCALVAR + r"|-?\d*[(]%rbp[)]" + r_localvarnofp = re.compile(LOCALVAR) + r_localvarfp = re.compile(LOCALVARFP) + r_localvar_esp = re.compile(r"(-?\d*)[(]%rsp[)]") + r_localvar_ebp = re.compile(r"(-?\d*)[(]%rbp[)]") + + r_rel_label = re.compile(r"(\d+):\s*$") + r_jump_rel_label = re.compile(r"\tj\w+\s+"+"(\d+)f"+"\s*$") + + r_unaryinsn_star= re.compile(r"\t[a-z]\w*\s+[*]("+OPERAND+")\s*$") + r_jmptable_item = re.compile(r"\t.quad\t"+LABEL+"(-\"[A-Za-z0-9$]+\")?\s*$") + r_jmptable_end = re.compile(r"\t.text|\t.section\s+.text|\t\.align|"+LABEL) + + r_gcroot_marker = re.compile(r"\t/[*] GCROOT ("+LOCALVARFP+") [*]/") + r_gcnocollect_marker = re.compile(r"\t/[*] GC_NOCOLLECT ("+OPERAND+") [*]/") + r_bottom_marker = re.compile(r"\t/[*] GC_STACK_BOTTOM [*]/") + + FUNCTIONS_NOT_RETURNING = { + 'abort': None, + '_exit': None, + '__assert_fail': None, + '___assert_rtn': None, + 'L___assert_rtn$stub': None, + 'L___eprintf$stub': None, + } + + def __init__(self, lines, filetag=0): + match = self.r_functionstart.match(lines[0]) + funcname = match.group(1) + match = self.r_functionend.match(lines[-1]) + assert funcname == match.group(1) + assert funcname == match.group(2) + super(ElfFunctionGcRootTracker64, self).__init__( funcname, lines, filetag) def extract_immediate(self, value): @@ -799,9 +1063,9 @@ return None return int(value[1:]) -ElfFunctionGcRootTracker.init_regexp() +ElfFunctionGcRootTracker64.init_regexp() -class DarwinFunctionGcRootTracker(ElfFunctionGcRootTracker): +class DarwinFunctionGcRootTracker(ElfFunctionGcRootTracker32): format = 'darwin' r_functionstart = re.compile(r"_(\w+):\s*$") @@ -810,7 +1074,7 @@ def __init__(self, lines, filetag=0): match = self.r_functionstart.match(lines[0]) funcname = '_' + match.group(1) - FunctionGcRootTracker.__init__(self, funcname, lines, filetag) + FunctionGcRootTracker32.__init__(self, funcname, lines, filetag) class Mingw32FunctionGcRootTracker(DarwinFunctionGcRootTracker): format = 'mingw32' @@ -821,7 +1085,7 @@ '__assert': None, } -class MsvcFunctionGcRootTracker(FunctionGcRootTracker): +class MsvcFunctionGcRootTracker(FunctionGcRootTracker32): format = 'msvc' ESP = 'esp' EBP = 'ebp' @@ -906,12 +1170,12 @@ push pop mov lea xor sub add '''.split(): - locals()['visit_' + name] = getattr(FunctionGcRootTracker, + locals()['visit_' + name] = getattr(FunctionGcRootTracker32, 'visit_' + name + 'l') - visit_int = FunctionGcRootTracker.visit_nop + visit_int = FunctionGcRootTracker32.visit_nop # probably not GC pointers - visit_cdq = FunctionGcRootTracker.visit_nop + visit_cdq = FunctionGcRootTracker32.visit_nop def visit_npad(self, line): # MASM has a nasty bug: it implements "npad 5" with "add eax, 0" @@ -1038,7 +1302,7 @@ table = tracker.computegcmaptable(self.verbose) if self.verbose > 1: for label, state in table: - print >> sys.stderr, label, '\t', format_callshape(state) + print >> sys.stderr, label, '\t', tracker.format_callshape(state) table = compress_gcmaptable(table) if self.shuffle and random.random() < 0.5: self.gcmaptable[:0] = table @@ -1049,7 +1313,7 @@ class ElfAssemblerParser(AssemblerParser): format = "elf" - FunctionGcRootTracker = ElfFunctionGcRootTracker + FunctionGcRootTracker = ElfFunctionGcRootTracker32 def find_functions(self, iterlines): functionlines = [] @@ -1072,6 +1336,10 @@ "missed the end of the previous function") yield False, functionlines +class ElfAssemblerParser64(ElfAssemblerParser): + format = "elf64" + FunctionGcRootTracker = ElfFunctionGcRootTracker64 + class DarwinAssemblerParser(AssemblerParser): format = "darwin" FunctionGcRootTracker = DarwinFunctionGcRootTracker @@ -1241,6 +1509,7 @@ PARSERS = { 'elf': ElfAssemblerParser, + 'elf64': ElfAssemblerParser64, 'darwin': DarwinAssemblerParser, 'mingw32': Mingw32AssemblerParser, 'msvc': MsvcAssemblerParser, @@ -1281,6 +1550,13 @@ txt = kwargs[self.format] print >> output, "\t%s" % txt + if self.format == 'elf64': + word_decl = '.quad' + else: + word_decl = '.long' + + tracker_cls = PARSERS[self.format].FunctionGcRootTracker + # The pypy_asm_stackwalk() function if self.format == 'msvc': @@ -1327,7 +1603,56 @@ } } """ + elif self.format == 'elf64': + print >> output, "\t.text" + print >> output, "\t.globl %s" % _globalname('pypy_asm_stackwalk') + print >> output, "\t.type pypy_asm_stackwalk, @function" + print >> output, "%s:" % _globalname('pypy_asm_stackwalk') + + print >> output, """\ + /* See description in asmgcroot.py */ + movq\t%rdi, %rdx\t/* 1st argument, which is the callback */ + movq\t%rsi, %rcx\t/* 2nd argument, which is gcrootanchor */ + movq\t%rsp, %rax\t/* my frame top address */ + pushq\t%rax\t\t/* ASM_FRAMEDATA[8] */ + pushq\t%rbp\t\t/* ASM_FRAMEDATA[7] */ + pushq\t%r15\t\t/* ASM_FRAMEDATA[6] */ + pushq\t%r14\t\t/* ASM_FRAMEDATA[5] */ + pushq\t%r13\t\t/* ASM_FRAMEDATA[4] */ + pushq\t%r12\t\t/* ASM_FRAMEDATA[3] */ + pushq\t%rbx\t\t/* ASM_FRAMEDATA[2] */ + + /* Add this ASM_FRAMEDATA to the front of the circular linked */ + /* list. Let's call it 'self'. */ + movq\t8(%rcx), %rax\t/* next = gcrootanchor->next */ + pushq\t%rax\t\t\t\t/* self->next = next */ + pushq\t%rcx\t\t\t/* self->prev = gcrootanchor */ + movq\t%rsp, 8(%rcx)\t/* gcrootanchor->next = self */ + movq\t%rsp, 0(%rax)\t\t\t/* next->prev = self */ + + /* note: the Mac OS X 16 bytes aligment must be respected. */ + call\t*%rdx\t\t/* invoke the callback */ + + /* Detach this ASM_FRAMEDATA from the circular linked list */ + popq\t%rsi\t\t/* prev = self->prev */ + popq\t%rdi\t\t/* next = self->next */ + movq\t%rdi, 8(%rsi)\t/* prev->next = next */ + movq\t%rsi, 0(%rdi)\t/* next->prev = prev */ + + popq\t%rbx\t\t/* restore from ASM_FRAMEDATA[2] */ + popq\t%r12\t\t/* restore from ASM_FRAMEDATA[3] */ + popq\t%r13\t\t/* restore from ASM_FRAMEDATA[4] */ + popq\t%r14\t\t/* restore from ASM_FRAMEDATA[5] */ + popq\t%r15\t\t/* restore from ASM_FRAMEDATA[6] */ + popq\t%rbp\t\t/* restore from ASM_FRAMEDATA[7] */ + popq\t%rcx\t\t/* ignored ASM_FRAMEDATA[8] */ + + /* the return value is the one of the 'call' above, */ + /* because %rax (and possibly %rdx) are unmodified */ + ret + .size pypy_asm_stackwalk, .-pypy_asm_stackwalk + """ else: print >> output, "\t.text" print >> output, "\t.globl %s" % _globalname('pypy_asm_stackwalk') @@ -1401,7 +1726,7 @@ n = shapes[state] except KeyError: n = shapes[state] = shapeofs - bytes = [str(b) for b in compress_callshape(state)] + bytes = [str(b) for b in tracker_cls.compress_callshape(state)] shapelines.append('\t%s,\t/* %s */\n' % ( ', '.join(bytes), shapeofs)) @@ -1433,17 +1758,18 @@ n = shapes[state] except KeyError: n = shapes[state] = shapeofs - bytes = [str(b) for b in compress_callshape(state)] + bytes = [str(b) for b in tracker_cls.compress_callshape(state)] shapelines.append('\t/*%d*/\t.byte\t%s\n' % ( shapeofs, ', '.join(bytes))) shapeofs += len(bytes) if is_range: n = ~ n - print >> output, '\t.long\t%s-%d' % ( + print >> output, '\t%s\t%s-%d' % ( + word_decl, label, - PARSERS[self.format].FunctionGcRootTracker.OFFSET_LABELS) - print >> output, '\t.long\t%d' % (n,) + tracker_cls.OFFSET_LABELS) + print >> output, '\t%s\t%d' % (word_decl, n) print >> output, """\ .globl __gcmapend @@ -1451,6 +1777,7 @@ """.replace("__gcmapend", _globalname("__gcmapend")) _variant(elf='.section\t.rodata', + elf64='.section\t.rodata', darwin='.const', mingw32='') @@ -1483,56 +1810,6 @@ pass -# __________ debugging output __________ - -def format_location(loc): - # A 'location' is a single number describing where a value is stored - # across a call. It can be in one of the CALLEE_SAVE_REGISTERS, or - # in the stack frame at an address relative to either %esp or %ebp. - # The last two bits of the location number are used to tell the cases - # apart; see format_location(). - assert loc >= 0 - kind = loc & LOC_MASK - if kind == LOC_REG: - if loc == LOC_NOWHERE: - return '?' - reg = (loc >> 2) - 1 - return ElfFunctionGcRootTracker.CALLEE_SAVE_REGISTERS[reg] - else: - offset = loc & ~ LOC_MASK - if kind == LOC_EBP_PLUS: - result = '(%ebp)' - elif kind == LOC_EBP_MINUS: - result = '(%ebp)' - offset = -offset - elif kind == LOC_ESP_PLUS: - result = '(%esp)' - else: - assert 0, kind - if offset != 0: - result = str(offset) + result - return result - -def format_callshape(shape): - # A 'call shape' is a tuple of locations in the sense of format_location(). - # They describe where in a function frame interesting values are stored, - # when this function executes a 'call' instruction. - # - # shape[0] is the location that stores the fn's own return address - # (not the return address for the currently executing 'call') - # shape[1] is where the fn saved its own caller's %ebx value - # shape[2] is where the fn saved its own caller's %esi value - # shape[3] is where the fn saved its own caller's %edi value - # shape[4] is where the fn saved its own caller's %ebp value - # shape[>=5] are GC roots: where the fn has put its local GCPTR vars - # - assert isinstance(shape, tuple) - assert len(shape) >= 5 - result = [format_location(loc) for loc in shape] - return '{%s | %s | %s}' % (result[0], - ', '.join(result[1:5]), - ', '.join(result[5:])) - # __________ table compression __________ def compress_gcmaptable(table): @@ -1559,49 +1836,6 @@ yield (label1, state, is_range) i = j -def compress_callshape(shape): - # For a single shape, this turns the list of integers into a list of - # bytes and reverses the order of the entries. The length is - # encoded by inserting a 0 marker after the gc roots coming from - # shape[5:] and before the 5 values coming from shape[4] to - # shape[0]. In practice it seems that shapes contain many integers - # whose value is up to a few thousands, which the algorithm below - # compresses down to 2 bytes. Very small values compress down to a - # single byte. - assert len(shape) >= 5 - shape = list(shape) - assert 0 not in shape[5:] - shape.insert(5, 0) - result = [] - for loc in shape: - assert loc >= 0 - flag = 0 - while loc >= 0x80: - result.append(int(loc & 0x7F) | flag) - flag = 0x80 - loc >>= 7 - result.append(int(loc) | flag) - result.reverse() - return result - -def decompress_callshape(bytes): - # For tests. This logic is copied in asmgcroot.py. - result = [] - n = 0 - while n < len(bytes): - value = 0 - while True: - b = bytes[n] - n += 1 - value += b - if b < 0x80: - break - value = (value - 0x80) << 7 - result.append(value) - result.reverse() - assert result[5] == 0 - del result[5] - return result def getidentifier(s): def mapchar(c): @@ -1626,7 +1860,10 @@ elif sys.platform == 'win32': format = 'mingw32' else: - format = 'elf' + if sys.maxint > 2147483647: + format = 'elf64' + else: + format = 'elf' entrypoint = 'main' while len(sys.argv) > 1: if sys.argv[1] == '-v': Modified: pypy/branch/fast-forward/pypy/translator/c/genc.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/genc.py (original) +++ pypy/branch/fast-forward/pypy/translator/c/genc.py Thu Sep 9 01:00:13 2010 @@ -1,7 +1,6 @@ import autopath import py import sys, os -from pypy.translator.c.node import PyObjectNode, FuncNode from pypy.translator.c.database import LowLevelDatabase from pypy.translator.c.extfunc import pre_include_code_lines from pypy.translator.llsupport.wrapper import new_wrapper @@ -196,7 +195,7 @@ all = [] for node in self.db.globalcontainers(): - eci = getattr(node, 'compilation_info', None) + eci = node.compilation_info() if eci: all.append(eci) self.merge_eci(*all) @@ -222,7 +221,7 @@ graphs = db.all_graphs() db.gctransformer.prepare_inline_helpers(graphs) for node in db.containerlist: - if isinstance(node, FuncNode): + if hasattr(node, 'funcgens'): for funcgen in node.funcgens: funcgen.patch_graph(copy_graph=False) return db Modified: pypy/branch/fast-forward/pypy/translator/c/node.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/node.py (original) +++ pypy/branch/fast-forward/pypy/translator/c/node.py Thu Sep 9 01:00:13 2010 @@ -77,6 +77,8 @@ if db.gcpolicy.need_no_typeptr(): assert self.fieldnames == ('typeptr',) self.fieldnames = () + # + self.fulltypename = '%s %s @' % (self.typetag, self.name) def setup(self): # this computes self.fields @@ -119,7 +121,7 @@ gcinfo = defaultproperty(computegcinfo) def gettype(self): - return '%s %s @' % (self.typetag, self.name) + return self.fulltypename def c_struct_field_name(self, name): # occasionally overridden in __init__(): @@ -211,6 +213,8 @@ self.name) = db.namespace.uniquename(basename, with_number=with_number, bare=True) self.dependencies = {} + self.fulltypename = '%s %s @' % (self.typetag, self.name) + self.fullptrtypename = '%s %s *@' % (self.typetag, self.name) def setup(self): if hasattr(self, 'itemtypename'): @@ -236,10 +240,10 @@ gcinfo = defaultproperty(computegcinfo) def gettype(self): - return '%s %s @' % (self.typetag, self.name) + return self.fulltypename def getptrtype(self): - return '%s %s *@' % (self.typetag, self.name) + return self.fullptrtypename def access_expr(self, baseexpr, index): return '%s.items[%s]' % (baseexpr, index) @@ -336,16 +340,19 @@ if ARRAY._hints.get("render_as_void"): contained_type = Void self.itemtypename = db.gettype(contained_type, who_asks=self) + self.fulltypename = self.itemtypename.replace('@', '(@)[%d]' % + (self.varlength,)) + self.fullptrtypename = self.itemtypename.replace('@', '*@') def setup(self): """Array loops are forbidden by ForwardReference.become() because there is no way to declare them in C.""" def gettype(self): - return self.itemtypename.replace('@', '(@)[%d]' % (self.varlength,)) + return self.fulltypename def getptrtype(self): - return self.itemtypename.replace('@', '*@') + return self.fullptrtypename def access_expr(self, baseexpr, index): return '%s[%d]' % (baseexpr, index) @@ -383,17 +390,19 @@ self.LLTYPE = FIXEDARRAY self.dependencies = {} self.itemtypename = db.gettype(FIXEDARRAY.OF, who_asks=self) + self.fulltypename = self.itemtypename.replace('@', '(@)[%d]' % + FIXEDARRAY.length) + self.fullptrtypename = self.itemtypename.replace('@', '*@') def setup(self): """Loops are forbidden by ForwardReference.become() because there is no way to declare them in C.""" def gettype(self): - FIXEDARRAY = self.FIXEDARRAY - return self.itemtypename.replace('@', '(@)[%d]' % FIXEDARRAY.length) + return self.fulltypename def getptrtype(self): - return self.itemtypename.replace('@', '*@') + return self.fullptrtypename def access_expr(self, baseexpr, index, dummy=False): if not isinstance(index, int): @@ -466,15 +475,15 @@ class ContainerNode(object): - if USESLOTS: - __slots__ = """db T obj + if USESLOTS: # keep the number of slots down! + __slots__ = """db obj typename implementationtypename - name ptrname compilation_info + name globalcontainer""".split() + eci_name = '_compilation_info' def __init__(self, db, T, obj): self.db = db - self.T = T self.obj = obj #self.dependencies = {} self.typename = db.gettype(T) #, who_asks=self) @@ -489,16 +498,24 @@ else: self.globalcontainer = False parentnode = db.getcontainernode(parent) - defnode = db.gettypedefnode(parentnode.T) + defnode = db.gettypedefnode(parentnode.getTYPE()) self.name = defnode.access_expr(parentnode.name, parentindex) if self.typename != self.implementationtypename: if db.gettypedefnode(T).extra_union_for_varlength: self.name += '.b' - self.compilation_info = getattr(obj, '_compilation_info', None) - self.ptrname = '(&%s)' % self.name + + def getptrname(self): + return '(&%s)' % self.name + + def getTYPE(self): + return typeOf(self.obj) def is_thread_local(self): - return hasattr(self.T, "_hints") and self.T._hints.get('thread_local') + T = self.getTYPE() + return hasattr(T, "_hints") and T._hints.get('thread_local') + + def compilation_info(self): + return getattr(self.obj, self.eci_name, None) def get_declaration(self): if self.name[-2:] == '.b': @@ -546,27 +563,31 @@ __slots__ = () def basename(self): - return self.T._name + T = self.getTYPE() + return T._name def enum_dependencies(self): - for name in self.T._names: + T = self.getTYPE() + for name in T._names: yield getattr(self.obj, name) def getlength(self): - if self.T._arrayfld is None: + T = self.getTYPE() + if T._arrayfld is None: return 1 else: - array = getattr(self.obj, self.T._arrayfld) + array = getattr(self.obj, T._arrayfld) return len(array.items) def initializationexpr(self, decoration=''): + T = self.getTYPE() is_empty = True yield '{' - defnode = self.db.gettypedefnode(self.T) + defnode = self.db.gettypedefnode(T) data = [] - if needs_gcheader(self.T): + if needs_gcheader(T): gc_init = self.db.gcpolicy.struct_gcheader_initdata(self) data.append(('gcheader', gc_init)) @@ -578,16 +599,16 @@ # '.fieldname = value'. But here we don't know which of the # fields need initialization, so XXX we pick the first one # arbitrarily. - if hasattr(self.T, "_hints") and self.T._hints.get('union'): + if hasattr(T, "_hints") and T._hints.get('union'): data = data[0:1] - if 'get_padding_drop' in self.T._hints: + if 'get_padding_drop' in T._hints: d = {} for name, _ in data: - T = defnode.c_struct_field_type(name) - typename = self.db.gettype(T) + T1 = defnode.c_struct_field_type(name) + typename = self.db.gettype(T1) d[name] = cdecl(typename, '') - padding_drop = self.T._hints['get_padding_drop'](d) + padding_drop = T._hints['get_padding_drop'](d) else: padding_drop = [] @@ -617,9 +638,10 @@ return 'struct _hashT_%s @' % self.name def forward_declaration(self): + T = self.getTYPE() assert self.typename == self.implementationtypename # no array part hash_typename = self.get_hash_typename() - hash_offset = self.db.gctransformer.get_hash_offset(self.T) + hash_offset = self.db.gctransformer.get_hash_offset(T) yield '%s {' % cdecl(hash_typename, '') yield '\tunion {' yield '\t\t%s;' % cdecl(self.implementationtypename, 'head') @@ -656,10 +678,10 @@ if USESLOTS: __slots__ = () - def __init__(self, db, T, obj): - ContainerNode.__init__(self, db, T, obj) - if barebonearray(T): - self.ptrname = self.name + def getptrname(self): + if barebonearray(self.getTYPE()): + return self.name + return ContainerNode.getptrname(self) def basename(self): return 'array' @@ -671,22 +693,23 @@ return len(self.obj.items) def initializationexpr(self, decoration=''): - defnode = self.db.gettypedefnode(self.T) + T = self.getTYPE() + defnode = self.db.gettypedefnode(T) yield '{' - if needs_gcheader(self.T): + if needs_gcheader(T): gc_init = self.db.gcpolicy.array_gcheader_initdata(self) lines = generic_initializationexpr(self.db, gc_init, 'gcheader', '%sgcheader' % (decoration,)) for line in lines: yield line - if self.T._hints.get('nolength', False): + if T._hints.get('nolength', False): length = '' else: length = '%d, ' % len(self.obj.items) - if self.T.OF is Void or len(self.obj.items) == 0: + if T.OF is Void or len(self.obj.items) == 0: yield '\t%s' % length.rstrip(', ') yield '}' - elif self.T.OF == Char: + elif T.OF == Char: if len(self.obj.items) and self.obj.items[0] is None: s = ''.join([self.obj.getitem(i) for i in range(len(self.obj.items))]) else: @@ -694,7 +717,7 @@ yield '\t%s%s' % (length, c_char_array_constant(s)) yield '}' else: - barebone = barebonearray(self.T) + barebone = barebonearray(T) if not barebone: yield '\t%s{' % length for j in range(len(self.obj.items)): @@ -716,13 +739,14 @@ if USESLOTS: __slots__ = () - def __init__(self, db, T, obj): - ContainerNode.__init__(self, db, T, obj) - if not isinstance(obj, _subarray): # XXX hackish - self.ptrname = self.name + def getptrname(self): + if not isinstance(self.obj, _subarray): # XXX hackish + return self.name + return ContainerNode.getptrname(self) def basename(self): - return self.T._name + T = self.getTYPE() + return T._name def enum_dependencies(self): for i in range(self.obj.getlength()): @@ -732,11 +756,12 @@ return 1 # not variable-sized! def initializationexpr(self, decoration=''): + T = self.getTYPE() assert self.typename == self.implementationtypename # not var-sized is_empty = True yield '{' # _names == ['item0', 'item1', ...] - for j, name in enumerate(self.T._names): + for j, name in enumerate(T._names): value = getattr(self.obj, name) lines = generic_initializationexpr(self.db, value, '%s[%d]' % (self.name, j), @@ -777,6 +802,7 @@ class FuncNode(ContainerNode): nodekind = 'func' + eci_name = 'compilation_info' # there not so many node of this kind, slots should not # be necessary @@ -794,11 +820,12 @@ else: self.name = (forcename or db.namespace.uniquename('g_' + self.basename())) - self.compilation_info = getattr(obj, 'compilation_info', None) self.make_funcgens() #self.dependencies = {} self.typename = db.gettype(T) #, who_asks=self) - self.ptrname = self.name + + def getptrname(self): + return self.name def make_funcgens(self): self.funcgens = select_function_code_generators(self.obj, self.db, self.name) @@ -939,18 +966,20 @@ return [] def initializationexpr(self, decoration=''): - yield 'RPyOpaque_INITEXPR_%s' % (self.T.tag,) + T = self.getTYPE() + yield 'RPyOpaque_INITEXPR_%s' % (T.tag,) def startupcode(self): - args = [self.ptrname] + T = self.getTYPE() + args = [self.getptrname()] # XXX how to make this code more generic? - if self.T.tag == 'ThreadLock': + if T.tag == 'ThreadLock': lock = self.obj.externalobj if lock.locked(): args.append('1') else: args.append('0') - yield 'RPyOpaque_SETUP_%s(%s);' % (self.T.tag, ', '.join(args)) + yield 'RPyOpaque_SETUP_%s(%s);' % (T.tag, ', '.join(args)) def opaquenode_factory(db, T, obj): @@ -974,13 +1003,15 @@ self.obj = obj value = obj.value self.name = self._python_c_name(value) - self.ptrname = self.name self.exported_name = self.name # a list of expressions giving places where this constant PyObject # must be copied. Normally just in the global variable of the same # name, but see also StructNode.initializationexpr() :-( self.where_to_copy_me = [] + def getptrname(self): + return self.name + def _python_c_name(self, value): # just some minimal cases: None and builtin exceptions if value is None: Modified: pypy/branch/fast-forward/pypy/translator/c/test/test_newgc.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/test/test_newgc.py (original) +++ pypy/branch/fast-forward/pypy/translator/c/test/test_newgc.py Thu Sep 9 01:00:13 2010 @@ -67,9 +67,8 @@ if not fullname.startswith('define'): continue keyword = conftest.option.keyword - if keyword: - if keyword.startswith('test_'): - keyword = keyword[len('test_'):] + if keyword.startswith('test_'): + keyword = keyword[len('test_'):] if keyword not in fullname: continue prefix, name = fullname.split('_', 1) @@ -1072,21 +1071,66 @@ should_be_moving = True GC_CAN_SHRINK_ARRAY = False - def setup_class(cls): - py.test.skip("Disabled for now") - def test_gc_set_max_heap_size(self): py.test.skip("not implemented") + def test_gc_heap_stats(self): + py.test.skip("not implemented") + def test_finalizer_order(self): py.test.skip("not implemented") + def define_adding_a_hash(cls): + from pypy.rlib.objectmodel import compute_identity_hash + S1 = lltype.GcStruct('S1', ('x', lltype.Signed)) + S2 = lltype.GcStruct('S2', ('p1', lltype.Ptr(S1)), + ('p2', lltype.Ptr(S1)), + ('p3', lltype.Ptr(S1)), + ('p4', lltype.Ptr(S1)), + ('p5', lltype.Ptr(S1)), + ('p6', lltype.Ptr(S1)), + ('p7', lltype.Ptr(S1)), + ('p8', lltype.Ptr(S1)), + ('p9', lltype.Ptr(S1))) + def g(): + lltype.malloc(S1) # forgotten, will be shifted over + s2 = lltype.malloc(S2) # a big object, overlaps its old position + s2.p1 = lltype.malloc(S1); s2.p1.x = 1010 + s2.p2 = lltype.malloc(S1); s2.p2.x = 1020 + s2.p3 = lltype.malloc(S1); s2.p3.x = 1030 + s2.p4 = lltype.malloc(S1); s2.p4.x = 1040 + s2.p5 = lltype.malloc(S1); s2.p5.x = 1050 + s2.p6 = lltype.malloc(S1); s2.p6.x = 1060 + s2.p7 = lltype.malloc(S1); s2.p7.x = 1070 + s2.p8 = lltype.malloc(S1); s2.p8.x = 1080 + s2.p9 = lltype.malloc(S1); s2.p9.x = 1090 + return s2 + def f(): + rgc.collect() + s2 = g() + h2 = compute_identity_hash(s2) + rgc.collect() # shift s2 to the left, but add a hash field + assert s2.p1.x == 1010 + assert s2.p2.x == 1020 + assert s2.p3.x == 1030 + assert s2.p4.x == 1040 + assert s2.p5.x == 1050 + assert s2.p6.x == 1060 + assert s2.p7.x == 1070 + assert s2.p8.x == 1080 + assert s2.p9.x == 1090 + return h2 - compute_identity_hash(s2) + return f + + def test_adding_a_hash(self): + res = self.run("adding_a_hash") + assert res == 0 + # ____________________________________________________________________ -class TestHybridTaggedPointers(TestHybridGC): +class TaggedPointersTest(object): taggedpointers = True - def define_tagged(cls): class Unrelated(object): pass @@ -1129,3 +1173,10 @@ __slots__ = 'smallint' def meth(self, x): return self.smallint + x + 3 + + +class TestHybridTaggedPointers(TaggedPointersTest, TestHybridGC): + pass + +class TestMarkCompactGCMostCompact(TaggedPointersTest, TestMarkCompactGC): + removetypeptr = True Modified: pypy/branch/fast-forward/pypy/translator/c/test/test_standalone.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/test/test_standalone.py (original) +++ pypy/branch/fast-forward/pypy/translator/c/test/test_standalone.py Thu Sep 9 01:00:13 2010 @@ -604,6 +604,33 @@ out, err = cbuilder.cmdexec("a b") assert out == "3" + def test_gcc_options(self): + # check that the env var CC is correctly interpreted, even if + # it contains the compiler name followed by some options. + if sys.platform == 'win32': + py.test.skip("only for gcc") + + from pypy.rpython.lltypesystem import lltype, rffi + dir = udir.ensure('test_gcc_options', dir=1) + dir.join('someextraheader.h').write('#define someextrafunc() 42\n') + eci = ExternalCompilationInfo(includes=['someextraheader.h']) + someextrafunc = rffi.llexternal('someextrafunc', [], lltype.Signed, + compilation_info=eci) + + def entry_point(argv): + return someextrafunc() + + old_cc = os.environ.get('CC') + try: + os.environ['CC'] = 'gcc -I%s' % dir + t, cbuilder = self.compile(entry_point) + finally: + if old_cc is None: + del os.environ['CC'] + else: + os.environ['CC'] = old_cc + + class TestMaemo(TestStandalone): def setup_class(cls): py.test.skip("TestMaemo: tests skipped for now") Modified: pypy/branch/fast-forward/pypy/translator/exceptiontransform.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/exceptiontransform.py (original) +++ pypy/branch/fast-forward/pypy/translator/exceptiontransform.py Thu Sep 9 01:00:13 2010 @@ -197,7 +197,7 @@ for graph in self.translator.graphs: self.create_exception_handling(graph) - def create_exception_handling(self, graph, always_exc_clear=False): + def create_exception_handling(self, graph): """After an exception in a direct_call (or indirect_call), that is not caught by an explicit except statement, we need to reraise the exception. So after this @@ -212,7 +212,6 @@ self.raise_analyzer.analyze_direct_call(graph) graph.exceptiontransformed = self.exc_data_ptr - self.always_exc_clear = always_exc_clear join_blocks(graph) # collect the blocks before changing them n_need_exc_matching_blocks = 0 @@ -455,13 +454,18 @@ block.recloseblock(l0, l) insert_zeroing_op = False - # XXX this is not right. it also inserts zero_gc_pointers_inside - # XXX on a path that malloc_nonmovable returns null, but does not raise - # XXX which might end up with a segfault. But we don't have such gc now - if spaceop.opname == 'malloc' or spaceop.opname == 'malloc_nonmovable': + if spaceop.opname == 'malloc': flavor = spaceop.args[1].value['flavor'] if flavor == 'gc': insert_zeroing_op = True + elif spaceop.opname == 'malloc_nonmovable': + # xxx we cannot insert zero_gc_pointers_inside after + # malloc_nonmovable, because it can return null. For now + # we simply always force the zero=True flag on + # malloc_nonmovable. + c_flags = spaceop.args[1] + c_flags.value = c_flags.value.copy() + spaceop.args[1].value['zero'] = True if insert_zeroing_op: if normalafterblock is None: @@ -479,16 +483,6 @@ [v_result_after], varoftype(lltype.Void))) - if self.always_exc_clear: - # insert code that clears the exception even in the non-exceptional - # case... this is a hint for the JIT, but pointless otherwise - if normalafterblock is None: - normalafterblock = insert_empty_block(None, l0) - llops = rtyper.LowLevelOpList(None) - self.gen_setfield('exc_value', self.c_null_evalue, llops) - self.gen_setfield('exc_type', self.c_null_etype, llops) - normalafterblock.operations[:0] = llops - class LLTypeExceptionTransformer(BaseExceptionTransformer): Modified: pypy/branch/fast-forward/pypy/translator/goal/app_main.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/goal/app_main.py (original) +++ pypy/branch/fast-forward/pypy/translator/goal/app_main.py Thu Sep 9 01:00:13 2010 @@ -223,7 +223,6 @@ path = os.getenv('PYTHONPATH') if path: newpath = path.split(os.pathsep) + newpath - newpath.insert(0, '') # remove duplicates _seen = {} del sys.path[:] @@ -349,8 +348,8 @@ else: raise CommandLineError('unrecognized option %r' % (arg,)) i += 1 - sys.argv = argv[i:] - if not sys.argv: + sys.argv[:] = argv[i:] # don't change the list that sys.argv is bound to + if not sys.argv: # (relevant in case of "reload(sys)") sys.argv.append('') options["run_stdin"] = True if print_sys_flags: @@ -393,6 +392,10 @@ except: print >> sys.stderr, "'import site' failed" + # update sys.path *after* loading site.py, in case there is a + # "site.py" file in the script's directory. + sys.path.insert(0, '') + if warnoptions: sys.warnoptions.append(warnoptions) from warnings import _processoptions @@ -541,6 +544,10 @@ reset.append(('PYTHONINSPECT', os.environ.get('PYTHONINSPECT', ''))) os.environ['PYTHONINSPECT'] = os.environ['PYTHONINSPECT_'] + # no one should change to which lists sys.argv and sys.path are bound + old_argv = sys.argv + old_path = sys.path + from pypy.module.sys.version import PYPY_VERSION sys.pypy_version_info = PYPY_VERSION sys.pypy_initial_path = pypy_initial_path @@ -553,3 +560,5 @@ sys.ps1 = '>>> ' # restore the normal ones, in case sys.ps2 = '... ' # we are dropping to CPython's prompt import os; os.environ.update(reset) + assert old_argv is sys.argv + assert old_path is sys.path Modified: pypy/branch/fast-forward/pypy/translator/goal/test2/test_app_main.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/goal/test2/test_app_main.py (original) +++ pypy/branch/fast-forward/pypy/translator/goal/test2/test_app_main.py Thu Sep 9 01:00:13 2010 @@ -1,10 +1,12 @@ """ Tests for the entry point of pypy-c, app_main.py. """ +from __future__ import with_statement import py import sys, os, re import autopath from pypy.tool.udir import udir +from contextlib import contextmanager banner = sys.version.splitlines()[0] @@ -326,8 +328,9 @@ class TestNonInteractive: def run(self, cmdline, senddata='', expect_prompt=False, - expect_banner=False): - cmdline = '%s "%s" %s' % (sys.executable, app_main, cmdline) + expect_banner=False, python_flags=''): + cmdline = '%s %s "%s" %s' % (sys.executable, python_flags, + app_main, cmdline) print 'POPEN:', cmdline child_in, child_out_err = os.popen4(cmdline) child_in.write(senddata) @@ -475,6 +478,43 @@ assert data == '\x00(STDOUT)\n\x00' # from stdout child_out_err.close() + def test_proper_sys_path(self, tmpdir): + + @contextmanager + def chdir_and_unset_pythonpath(new_cwd): + old_cwd = new_cwd.chdir() + old_pythonpath = os.getenv('PYTHONPATH') + os.unsetenv('PYTHONPATH') + try: + yield + finally: + old_cwd.chdir() + os.putenv('PYTHONPATH', old_pythonpath) + + tmpdir.join('site.py').write('print "SHOULD NOT RUN"') + runme_py = tmpdir.join('runme.py') + runme_py.write('print "some text"') + + cmdline = str(runme_py) + + with chdir_and_unset_pythonpath(tmpdir): + data = self.run(cmdline, python_flags='-S') + + assert data == "some text\n" + + runme2_py = tmpdir.mkdir('otherpath').join('runme2.py') + runme2_py.write('print "some new text"\n' + 'import sys\n' + 'print sys.path\n') + + cmdline2 = str(runme2_py) + + with chdir_and_unset_pythonpath(tmpdir): + data = self.run(cmdline2, python_flags='-S') + + assert data.startswith("some new text\n") + assert repr(str(tmpdir.join('otherpath'))) in data + class AppTestAppMain: Modified: pypy/branch/fast-forward/pypy/translator/goal/translate.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/goal/translate.py (original) +++ pypy/branch/fast-forward/pypy/translator/goal/translate.py Thu Sep 9 01:00:13 2010 @@ -18,7 +18,7 @@ ArbitraryOption, StrOption, IntOption, Config, \ ChoiceOption, OptHelpFormatter from pypy.config.translationoption import get_combined_translation_config -from pypy.config.translationoption import set_opt_level +from pypy.config.translationoption import set_opt_level, final_check_config from pypy.config.translationoption import OPT_LEVELS, DEFAULT_OPT_LEVEL from pypy.config.translationoption import PLATFORMS, set_platform @@ -175,6 +175,9 @@ if 'handle_config' in targetspec_dic: targetspec_dic['handle_config'](config, translateconfig) + # perform checks (if any) on the final config + final_check_config(config) + if translateconfig.help: opt_parser.print_help() if 'print_help' in targetspec_dic: Modified: pypy/branch/fast-forward/pypy/translator/platform/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/platform/__init__.py (original) +++ pypy/branch/fast-forward/pypy/translator/platform/__init__.py Thu Sep 9 01:00:13 2010 @@ -38,9 +38,9 @@ name = "abstract platform" c_environ = None - relevant_environ = [] + relevant_environ = () - so_prefixes = [''] + so_prefixes = ('',) def __init__(self, cc): if self.__class__ is Platform: @@ -99,15 +99,20 @@ self.__dict__ == other.__dict__) def key(self): - bits = [self.__class__.__name__, 'cc=%s' % self.cc] + bits = [self.__class__.__name__, 'cc=%r' % self.cc] for varname in self.relevant_environ: - bits.append('%s=%s' % (varname, os.environ.get(varname))) + bits.append('%s=%r' % (varname, os.environ.get(varname))) return ' '.join(bits) # some helpers which seem to be cross-platform enough def _execute_c_compiler(self, cc, args, outname, cwd=None): log.execute(cc + ' ' + ' '.join(args)) + # 'cc' can also contain some options for the C compiler; + # e.g. it can be "gcc -m32". We handle it by splitting on ' '. + cclist = cc.split() + cc = cclist[0] + args = cclist[1:] + args returncode, stdout, stderr = _run_subprocess(cc, args, self.c_environ, cwd) self._handle_error(returncode, stderr, stdout, outname) @@ -146,7 +151,7 @@ extra = self.standalone_only else: extra = self.shared_only - cflags = self.cflags + extra + cflags = list(self.cflags) + list(extra) return (cflags + list(eci.compile_extra) + args) def _preprocess_library_dirs(self, library_dirs): @@ -158,10 +163,10 @@ libraries = self._libs(eci.libraries) link_files = self._linkfiles(eci.link_files) export_flags = self._exportsymbols_link_flags(eci) - return (library_dirs + self.link_flags + export_flags + + return (library_dirs + list(self.link_flags) + export_flags + link_files + list(eci.link_extra) + libraries) - def _exportsymbols_link_flags(self, eci): + def _exportsymbols_link_flags(self, eci, relto=None): if eci.export_symbols: raise ValueError("This platform does not support export symbols") return [] Modified: pypy/branch/fast-forward/pypy/translator/platform/darwin.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/platform/darwin.py (original) +++ pypy/branch/fast-forward/pypy/translator/platform/darwin.py Thu Sep 9 01:00:13 2010 @@ -5,10 +5,10 @@ class Darwin(posix.BasePosix): name = "darwin" - link_flags = ['-mmacosx-version-min=10.4'] - cflags = ['-O3', '-fomit-frame-pointer', '-mmacosx-version-min=10.4'] - standalone_only = ['-mdynamic-no-pic'] - shared_only = [] + link_flags = ('-mmacosx-version-min=10.4',) + cflags = ('-O3', '-fomit-frame-pointer', '-mmacosx-version-min=10.4') + standalone_only = ('-mdynamic-no-pic',) + shared_only = () so_ext = 'so' @@ -18,8 +18,9 @@ self.cc = cc def _args_for_shared(self, args): - return (self.shared_only + ['-dynamiclib', '-undefined', 'dynamic_lookup'] - + args) + return (list(self.shared_only) + + ['-dynamiclib', '-undefined', 'dynamic_lookup'] + + args) def _preprocess_include_dirs(self, include_dirs): res_incl_dirs = list(include_dirs) @@ -56,7 +57,7 @@ include_dirs = self._includedirs(eci.include_dirs) return (args + frameworks + include_dirs) - def _exportsymbols_link_flags(self, eci): + def _exportsymbols_link_flags(self, eci, relto=None): if not eci.export_symbols: return [] @@ -65,15 +66,19 @@ for sym in eci.export_symbols: f.write("_%s\n" % (sym,)) f.close() + + if relto: + response_file = relto.bestrelpath(response_file) return ["-Wl,-exported_symbols_list,%s" % (response_file,)] class Darwin_i386(Darwin): name = "darwin_i386" - link_flags = ['-arch', 'i386', '-mmacosx-version-min=10.4'] - cflags = ['-arch', 'i386', '-O3', '-fomit-frame-pointer', '-mmacosx-version-min=10.4'] + link_flags = ('-arch', 'i386', '-mmacosx-version-min=10.4') + cflags = ('-arch', 'i386', '-O3', '-fomit-frame-pointer', + '-mmacosx-version-min=10.4') class Darwin_x86_64(Darwin): name = "darwin_x86_64" - link_flags = ['-arch', 'x86_64', '-mmacosx-version-min=10.4'] - cflags = ['-arch', 'x86_64', '-O3', '-fomit-frame-pointer', '-mmacosx-version-min=10.4'] - + link_flags = ('-arch', 'x86_64', '-mmacosx-version-min=10.4') + cflags = ('-arch', 'x86_64', '-O3', '-fomit-frame-pointer', + '-mmacosx-version-min=10.4') Modified: pypy/branch/fast-forward/pypy/translator/platform/freebsd7.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/platform/freebsd7.py (original) +++ pypy/branch/fast-forward/pypy/translator/platform/freebsd7.py Thu Sep 9 01:00:13 2010 @@ -5,10 +5,10 @@ class Freebsd7(posix.BasePosix): name = "freebsd7" - link_flags = ['-pthread'] - cflags = ['-O3', '-pthread', '-fomit-frame-pointer'] - standalone_only = [] - shared_only = [] + link_flags = ('-pthread',) + cflags = ('-O3', '-pthread', '-fomit-frame-pointer') + standalone_only = () + shared_only = () so_ext = 'so' make_cmd = 'gmake' @@ -32,4 +32,4 @@ return ['/usr/local/lib'] class Freebsd7_64(Freebsd7): - shared_only = ['-fPIC'] + shared_only = ('-fPIC',) Modified: pypy/branch/fast-forward/pypy/translator/platform/linux.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/platform/linux.py (original) +++ pypy/branch/fast-forward/pypy/translator/platform/linux.py Thu Sep 9 01:00:13 2010 @@ -3,15 +3,16 @@ from pypy.translator.platform import _run_subprocess from pypy.translator.platform.posix import BasePosix -class Linux(BasePosix): +class BaseLinux(BasePosix): name = "linux" - link_flags = ['-pthread', '-lrt'] - cflags = ['-O3', '-pthread', '-fomit-frame-pointer', '-Wall', '-Wno-unused'] - standalone_only = [] - shared_only = ['-fPIC'] + link_flags = ('-pthread', '-lrt') + cflags = ('-O3', '-pthread', '-fomit-frame-pointer', + '-Wall', '-Wno-unused') + standalone_only = () + shared_only = ('-fPIC',) so_ext = 'so' - so_prefixes = ['lib', ''] + so_prefixes = ('lib', '') def _args_for_shared(self, args): return ['-shared'] + args @@ -24,10 +25,12 @@ return self._pkg_config("libffi", "--libs-only-L", ['/usr/lib/libffi']) + +class Linux(BaseLinux): def library_dirs_for_libffi_a(self): # places where we need to look for libffi.a return self.library_dirs_for_libffi() + ['/usr/lib'] -class Linux64(Linux): - shared_only = ['-fPIC'] +class Linux64(BaseLinux): + pass Modified: pypy/branch/fast-forward/pypy/translator/platform/maemo.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/platform/maemo.py (original) +++ pypy/branch/fast-forward/pypy/translator/platform/maemo.py Thu Sep 9 01:00:13 2010 @@ -13,7 +13,7 @@ class Maemo(Linux): name = "maemo" - available_includedirs = ['/usr/include', '/tmp'] + available_includedirs = ('/usr/include', '/tmp') copied_cache = {} def _invent_new_name(self, basepath, base): Modified: pypy/branch/fast-forward/pypy/translator/platform/posix.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/platform/posix.py (original) +++ pypy/branch/fast-forward/pypy/translator/platform/posix.py Thu Sep 9 01:00:13 2010 @@ -4,17 +4,20 @@ from pypy.translator.platform import Platform, log, _run_subprocess from pypy.tool import autopath -import py, os +import py, os, sys class BasePosix(Platform): exe_ext = '' make_cmd = 'make' - relevant_environ=['CPATH', 'LIBRARY_PATH', 'C_INCLUDE_PATH'] + relevant_environ = ('CPATH', 'LIBRARY_PATH', 'C_INCLUDE_PATH') def __init__(self, cc=None): if cc is None: - cc = 'gcc' + try: + cc = os.environ['CC'] + except KeyError: + cc = 'gcc' self.cc = cc def _libs(self, libraries): @@ -39,7 +42,7 @@ def _link_args_from_eci(self, eci, standalone): return Platform._link_args_from_eci(self, eci, standalone) - def _exportsymbols_link_flags(self, eci): + def _exportsymbols_link_flags(self, eci, relto=None): if not eci.export_symbols: return [] @@ -50,6 +53,9 @@ f.write("%s;\n" % (sym,)) f.write("};") f.close() + + if relto: + response_file = relto.bestrelpath(response_file) return ["-Wl,--export-dynamic,--version-script=%s" % (response_file,)] def _link(self, cc, ofiles, link_args, standalone, exe_name): @@ -86,11 +92,11 @@ else: exe_name = exe_name.new(ext=self.exe_ext) - linkflags = self.link_flags[:] + linkflags = list(self.link_flags) if shared: linkflags = self._args_for_shared(linkflags) - linkflags += self._exportsymbols_link_flags(eci) + linkflags += self._exportsymbols_link_flags(eci, relto=path) if shared: libname = exe_name.new(ext='').basename @@ -98,6 +104,13 @@ else: target_name = exe_name.basename + cflags = self.cflags + if sys.maxint > 2147483647: # XXX XXX XXX sort this out + if shared: + cflags = self.cflags + self.shared_only + else: + cflags = self.cflags + self.standalone_only + m = GnuMakefile(path) m.exe_name = exe_name m.eci = eci @@ -126,7 +139,7 @@ ('LIBS', self._libs(eci.libraries)), ('LIBDIRS', self._libdirs(eci.library_dirs)), ('INCLUDEDIRS', self._includedirs(rel_includedirs)), - ('CFLAGS', self.cflags), + ('CFLAGS', cflags), ('CFLAGSEXTRA', list(eci.compile_extra)), ('LDFLAGS', linkflags), ('LDFLAGSEXTRA', list(eci.link_extra)), Modified: pypy/branch/fast-forward/pypy/translator/platform/test/test_platform.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/platform/test/test_platform.py (original) +++ pypy/branch/fast-forward/pypy/translator/platform/test/test_platform.py Thu Sep 9 01:00:13 2010 @@ -131,7 +131,7 @@ self.cc = 'xcc' x = XPlatform() res = x.key() - assert res.startswith('XPlatform cc=xcc CPATH=') + assert res.startswith("XPlatform cc='xcc' CPATH=") def test_equality(): class X(Platform): Modified: pypy/branch/fast-forward/pypy/translator/platform/windows.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/platform/windows.py (original) +++ pypy/branch/fast-forward/pypy/translator/platform/windows.py Thu Sep 9 01:00:13 2010 @@ -72,10 +72,10 @@ cc = 'cl.exe' link = 'link.exe' - cflags = ['/MD', '/O2'] - link_flags = [] - standalone_only = [] - shared_only = [] + cflags = ('/MD', '/O2') + link_flags = () + standalone_only = () + shared_only = () environ = None def __init__(self, cc=None): @@ -141,7 +141,7 @@ # Windows needs to resolve all symbols even for DLLs return super(MsvcPlatform, self)._link_args_from_eci(eci, standalone=True) - def _exportsymbols_link_flags(self, eci): + def _exportsymbols_link_flags(self, eci, relto=None): if not eci.export_symbols: return [] @@ -150,6 +150,9 @@ for sym in eci.export_symbols: f.write("/EXPORT:%s\n" % (sym,)) f.close() + + if relto: + response_file = relto.bestrelpath(response_file) return ["@%s" % (response_file,)] def _compile_c_file(self, cc, cfile, compile_args): @@ -215,11 +218,11 @@ m.exe_name = exe_name m.eci = eci - linkflags = self.link_flags[:] + linkflags = list(self.link_flags) if shared: linkflags = self._args_for_shared(linkflags) + [ '/EXPORT:$(PYPY_MAIN_FUNCTION)'] - linkflags += self._exportsymbols_link_flags(eci) + linkflags += self._exportsymbols_link_flags(eci, relto=path) if shared: so_name = exe_name.new(purebasename='lib' + exe_name.purebasename, @@ -333,10 +336,10 @@ class MingwPlatform(posix.BasePosix): name = 'mingw32' - standalone_only = [] - shared_only = [] - cflags = ['-O3'] - link_flags = [] + standalone_only = () + shared_only = () + cflags = ('-O3',) + link_flags = () exe_ext = 'exe' so_ext = 'dll' From fijal at codespeak.net Thu Sep 9 02:02:43 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Thu, 9 Sep 2010 02:02:43 +0200 (CEST) Subject: [pypy-svn] r76956 - pypy/branch/rsocket-improvements/pypy/jit/tool Message-ID: <20100909000243.F3A2D282BDC@codespeak.net> Author: fijal Date: Thu Sep 9 02:02:42 2010 New Revision: 76956 Modified: pypy/branch/rsocket-improvements/pypy/jit/tool/traceviewer.py Log: Update this tool (how-this-could-have-ever-worked kind of checkin) Modified: pypy/branch/rsocket-improvements/pypy/jit/tool/traceviewer.py ============================================================================== --- pypy/branch/rsocket-improvements/pypy/jit/tool/traceviewer.py (original) +++ pypy/branch/rsocket-improvements/pypy/jit/tool/traceviewer.py Thu Sep 9 02:02:42 2010 @@ -250,13 +250,14 @@ class Counts(dict): pass -def main(loopfile, options, view=True): +def main(loopfile, use_threshold, view=True): countname = py.path.local(loopfile + '.count') if countname.check(): - counts = [line.rsplit(':', 1) for line in countname.readlines()] - counts = Counts([(k, int(v.strip('\n'))) for k, v in counts]) + counts = [re.split(r' +', line, 1) for line in countname.readlines()] + counts = Counts([(k.strip("\n"), int(v.strip('\n'))) + for v, k in counts]) l = list(sorted(counts.values())) - if len(l) > 20 and options.use_threshold: + if len(l) > 20 and use_threshold: counts.threshold = l[-20] else: counts.threshold = 0 @@ -274,7 +275,7 @@ if __name__ == '__main__': parser = optparse.OptionParser(usage=__doc__) parser.add_option('--use-threshold', dest='use_threshold', - action="store_true") + action="store_true", default=False) options, args = parser.parse_args(sys.argv) if len(args) != 2: print __doc__ From benjamin at codespeak.net Thu Sep 9 02:14:24 2010 From: benjamin at codespeak.net (benjamin at codespeak.net) Date: Thu, 9 Sep 2010 02:14:24 +0200 (CEST) Subject: [pypy-svn] r76957 - pypy/branch/fast-forward/lib-python Message-ID: <20100909001424.8577B282BDC@codespeak.net> Author: benjamin Date: Thu Sep 9 02:14:23 2010 New Revision: 76957 Modified: pypy/branch/fast-forward/lib-python/conftest.py Log: test_profilehooks.py is no more Modified: pypy/branch/fast-forward/lib-python/conftest.py ============================================================================== --- pypy/branch/fast-forward/lib-python/conftest.py (original) +++ pypy/branch/fast-forward/lib-python/conftest.py Thu Sep 9 02:14:23 2010 @@ -377,7 +377,6 @@ RegrTest('test_pprint.py', core=True), RegrTest('test_print.py', core=True), RegrTest('test_profile.py'), - RegrTest('test_profilehooks.py', core=True), RegrTest('test_property.py', core=True), RegrTest('test_pstats.py'), RegrTest('test_pty.py', skip="unsupported extension module"), From benjamin at codespeak.net Thu Sep 9 03:15:57 2010 From: benjamin at codespeak.net (benjamin at codespeak.net) Date: Thu, 9 Sep 2010 03:15:57 +0200 (CEST) Subject: [pypy-svn] r76958 - pypy/branch/fast-forward/lib-python Message-ID: <20100909011557.945E2282BDC@codespeak.net> Author: benjamin Date: Thu Sep 9 03:15:56 2010 New Revision: 76958 Modified: pypy/branch/fast-forward/lib-python/conftest.py Log: adjust due to test changes Modified: pypy/branch/fast-forward/lib-python/conftest.py ============================================================================== --- pypy/branch/fast-forward/lib-python/conftest.py (original) +++ pypy/branch/fast-forward/lib-python/conftest.py Thu Sep 9 03:15:56 2010 @@ -394,6 +394,7 @@ RegrTest('test_resource.py', skip=skip_win32), RegrTest('test_rfc822.py'), RegrTest('test_richcmp.py', core=True), + RegrTest('test_rlcompleter.py'), RegrTest('test_robotparser.py'), RegrTest('test_sax.py'), @@ -440,6 +441,8 @@ RegrTest('test_symtable.py', skip="implementation detail"), RegrTest('test_syntax.py', core=True), RegrTest('test_sys.py', core=True), + RegrTest('test_sys_settrace.py', core=True), + RegrTest('test_sys_setprofile.py', core=True), RegrTest('test_sysconfig.py'), RegrTest('test_tcl.py', skip="unsupported extension module"), RegrTest('test_tarfile.py'), @@ -462,7 +465,6 @@ RegrTest('test_ttk_guionly.py'), RegrTest('test_ttk_textonly.py'), RegrTest('test_tokenize.py'), - RegrTest('test_trace.py', core=True), RegrTest('test_traceback.py', core=True), RegrTest('test_transformer.py', core=True), RegrTest('test_tuple.py', core=True), From benjamin at codespeak.net Thu Sep 9 03:17:02 2010 From: benjamin at codespeak.net (benjamin at codespeak.net) Date: Thu, 9 Sep 2010 03:17:02 +0200 (CEST) Subject: [pypy-svn] r76959 - pypy/branch/fast-forward/pypy/module/__builtin__ Message-ID: <20100909011702.0FFA6282BDC@codespeak.net> Author: benjamin Date: Thu Sep 9 03:17:00 2010 New Revision: 76959 Modified: pypy/branch/fast-forward/pypy/module/__builtin__/interp_classobj.py Log: make class objects weakrefable Modified: pypy/branch/fast-forward/pypy/module/__builtin__/interp_classobj.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/__builtin__/interp_classobj.py (original) +++ pypy/branch/fast-forward/pypy/module/__builtin__/interp_classobj.py Thu Sep 9 03:17:00 2010 @@ -228,6 +228,7 @@ unwrap_spec=['self', ObjSpace, W_Root, W_Root]), __delattr__ = interp2app(W_ClassObject.descr_delattr, unwrap_spec=['self', ObjSpace, W_Root]), + __weakref__ = make_weakref_descr(W_ClassObject), ) W_ClassObject.typedef.acceptable_as_base_class = False From arigo at codespeak.net Thu Sep 9 11:36:07 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 9 Sep 2010 11:36:07 +0200 (CEST) Subject: [pypy-svn] r76960 - in pypy/trunk/pypy/rpython/tool: . test Message-ID: <20100909093607.A7298282BDC@codespeak.net> Author: arigo Date: Thu Sep 9 11:36:03 2010 New Revision: 76960 Modified: pypy/trunk/pypy/rpython/tool/rffi_platform.py pypy/trunk/pypy/rpython/tool/test/test_rffi_platform.py Log: In rffi_platform, return an integer of some RPython type that fits it: either int, r_uint, r_longlong or r_ulonglong. Modified: pypy/trunk/pypy/rpython/tool/rffi_platform.py ============================================================================== --- pypy/trunk/pypy/rpython/tool/rffi_platform.py (original) +++ pypy/trunk/pypy/rpython/tool/rffi_platform.py Thu Sep 9 11:36:03 2010 @@ -8,6 +8,7 @@ from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.translator.platform import CompilationError from pypy.tool.udir import udir +from pypy.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong, intmask # ____________________________________________________________ # @@ -371,7 +372,7 @@ yield '}' def build_result(self, info, config_result): - return info['value'] + return expose_value_as_rpython(info['value']) class DefinedConstantInteger(CConfigEntry): """An entry in a CConfig class that stands for an externally @@ -397,7 +398,7 @@ def build_result(self, info, config_result): if info["defined"]: - return info['value'] + return expose_value_as_rpython(info['value']) return None class DefinedConstantString(CConfigEntry): @@ -620,6 +621,20 @@ raise TypeError("conflicting field type %r for %r" % (fieldtype, fieldname)) +def expose_value_as_rpython(value): + if intmask(value) == value: + return value + if r_uint(value) == value: + return r_uint(value) + try: + if r_longlong(value) == value: + return r_longlong(value) + except OverflowError: + pass + if r_ulonglong(value) == value: + return r_ulonglong(value) + raise OverflowError("value %d does not fit into any RPython integer type" + % (value,)) C_HEADER = """ #include Modified: pypy/trunk/pypy/rpython/tool/test/test_rffi_platform.py ============================================================================== --- pypy/trunk/pypy/rpython/tool/test/test_rffi_platform.py (original) +++ pypy/trunk/pypy/rpython/tool/test/test_rffi_platform.py Thu Sep 9 11:36:03 2010 @@ -5,6 +5,7 @@ from pypy.tool.udir import udir from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.translator.platform import platform +from pypy.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong def import_ctypes(): try: @@ -357,3 +358,19 @@ padding = list(S._hints['padding']) d = {'c_c1': 'char'} assert S._hints['get_padding_drop'](d) == padding + +def test_expose_value_as_rpython(): + def get(x): + x = rffi_platform.expose_value_as_rpython(x) + return (x, type(x)) + assert get(5) == (5, int) + assert get(-82) == (-82, int) + assert get(sys.maxint) == (sys.maxint, int) + assert get(sys.maxint+1) == (sys.maxint+1, r_uint) + if sys.maxint == 2147483647: + assert get(9999999999) == (9999999999, r_longlong) + assert get(-9999999999) == (-9999999999, r_longlong) + assert get(2**63) == (2**63, r_ulonglong) + assert get(-2**63) == (-2**63, r_longlong) + py.test.raises(OverflowError, get, -2**63-1) + py.test.raises(OverflowError, get, 2**64) From fijal at codespeak.net Thu Sep 9 11:38:04 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Thu, 9 Sep 2010 11:38:04 +0200 (CEST) Subject: [pypy-svn] r76961 - pypy/branch/rsocket-improvements/pypy/tool/release Message-ID: <20100909093804.63269282BDC@codespeak.net> Author: fijal Date: Thu Sep 9 11:38:02 2010 New Revision: 76961 Modified: pypy/branch/rsocket-improvements/pypy/tool/release/force-builds.py Log: update builder name here Modified: pypy/branch/rsocket-improvements/pypy/tool/release/force-builds.py ============================================================================== --- pypy/branch/rsocket-improvements/pypy/tool/release/force-builds.py (original) +++ pypy/branch/rsocket-improvements/pypy/tool/release/force-builds.py Thu Sep 9 11:38:02 2010 @@ -23,7 +23,7 @@ 'pypy-c-app-level-linux-x86-32', 'pypy-c-app-level-linux-64', 'pypy-c-stackless-app-level-linux-x86-32', - 'pypy-c-app-level-win-32', + 'pypy-c-app-level-win-x86-32', 'pypy-c-jit-linux-x86-32', # 'pypy-c-jit-macosx-x86-32', 'pypy-c-jit-win-x86-32', From fijal at codespeak.net Thu Sep 9 11:38:49 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Thu, 9 Sep 2010 11:38:49 +0200 (CEST) Subject: [pypy-svn] r76962 - pypy/branch/rsocket-improvements/pypy/tool/release Message-ID: <20100909093849.061B5282BDC@codespeak.net> Author: fijal Date: Thu Sep 9 11:38:47 2010 New Revision: 76962 Modified: pypy/branch/rsocket-improvements/pypy/tool/release/force-builds.py Log: update this builder name as well Modified: pypy/branch/rsocket-improvements/pypy/tool/release/force-builds.py ============================================================================== --- pypy/branch/rsocket-improvements/pypy/tool/release/force-builds.py (original) +++ pypy/branch/rsocket-improvements/pypy/tool/release/force-builds.py Thu Sep 9 11:38:47 2010 @@ -21,7 +21,7 @@ 'own-linux-x86-64', # 'own-macosx-x86-32', 'pypy-c-app-level-linux-x86-32', - 'pypy-c-app-level-linux-64', + 'pypy-c-app-level-linux-x86-64', 'pypy-c-stackless-app-level-linux-x86-32', 'pypy-c-app-level-win-x86-32', 'pypy-c-jit-linux-x86-32', From arigo at codespeak.net Thu Sep 9 12:07:42 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 9 Sep 2010 12:07:42 +0200 (CEST) Subject: [pypy-svn] r76963 - pypy/branch/jit-generator/pypy/jit/metainterp Message-ID: <20100909100742.17213282B9C@codespeak.net> Author: arigo Date: Thu Sep 9 12:07:41 2010 New Revision: 76963 Modified: pypy/branch/jit-generator/pypy/jit/metainterp/compile.py Log: Avoids one extra copy. Modified: pypy/branch/jit-generator/pypy/jit/metainterp/compile.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/compile.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/compile.py Thu Sep 9 12:07:41 2010 @@ -57,12 +57,9 @@ loop.inputargs = history.inputargs for box in loop.inputargs: assert isinstance(box, Box) - if start > 0: - ops = history.operations[start:] - else: - ops = history.operations # make a copy, because optimize_loop can mutate the ops and descrs - loop.operations = [op.clone() for op in ops] + h_ops = history.operations + loop.operations = [h_ops[i].clone() for i in range(start, len(h_ops))] metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd) From fijal at codespeak.net Thu Sep 9 12:12:14 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Thu, 9 Sep 2010 12:12:14 +0200 (CEST) Subject: [pypy-svn] r76964 - pypy/branch/rsocket-improvements/pypy/rlib Message-ID: <20100909101214.460AD282B9C@codespeak.net> Author: fijal Date: Thu Sep 9 12:12:12 2010 New Revision: 76964 Modified: pypy/branch/rsocket-improvements/pypy/rlib/rsocket.py Log: Make PacketAddress optional Modified: pypy/branch/rsocket-improvements/pypy/rlib/rsocket.py ============================================================================== --- pypy/branch/rsocket-improvements/pypy/rlib/rsocket.py (original) +++ pypy/branch/rsocket-improvements/pypy/rlib/rsocket.py Thu Sep 9 12:12:12 2010 @@ -199,64 +199,65 @@ # ____________________________________________________________ -class PacketAddress(Address): - family = AF_PACKET - struct = _c.sockaddr_ll - maxlen = minlen = sizeof(struct) +if 'AF_PACKET' in constants: + class PacketAddress(Address): + family = AF_PACKET + struct = _c.sockaddr_ll + maxlen = minlen = sizeof(struct) - def get_ifname(self, fd): - a = self.lock(_c.sockaddr_ll) - p = lltype.malloc(_c.ifreq, flavor='raw') - rffi.setintfield(p, 'c_ifr_ifindex', - rffi.getintfield(a, 'c_sll_ifindex')) - if (_c.ioctl(fd, _c.SIOCGIFNAME, p) == 0): - # eh, the iface name is a constant length array - i = 0 - d = [] - while p.c_ifr_name[i] != '\x00' and i < len(p.c_ifr_name): - d.append(p.c_ifr_name[i]) - i += 1 - ifname = ''.join(d) - else: - ifname = "" - lltype.free(p, flavor='raw') - self.unlock() - return ifname + def get_ifname(self, fd): + a = self.lock(_c.sockaddr_ll) + p = lltype.malloc(_c.ifreq, flavor='raw') + rffi.setintfield(p, 'c_ifr_ifindex', + rffi.getintfield(a, 'c_sll_ifindex')) + if (_c.ioctl(fd, _c.SIOCGIFNAME, p) == 0): + # eh, the iface name is a constant length array + i = 0 + d = [] + while p.c_ifr_name[i] != '\x00' and i < len(p.c_ifr_name): + d.append(p.c_ifr_name[i]) + i += 1 + ifname = ''.join(d) + else: + ifname = "" + lltype.free(p, flavor='raw') + self.unlock() + return ifname - def get_protocol(self): - a = self.lock(_c.sockaddr_ll) - res = ntohs(rffi.getintfield(a, 'c_sll_protocol')) - self.unlock() - return res + def get_protocol(self): + a = self.lock(_c.sockaddr_ll) + res = ntohs(rffi.getintfield(a, 'c_sll_protocol')) + self.unlock() + return res - def get_pkttype(self): - a = self.lock(_c.sockaddr_ll) - res = rffi.getintfield(a, 'c_sll_pkttype') - self.unlock() - return res + def get_pkttype(self): + a = self.lock(_c.sockaddr_ll) + res = rffi.getintfield(a, 'c_sll_pkttype') + self.unlock() + return res - def get_hatype(self): - a = self.lock(_c.sockaddr_ll) - res = bool(rffi.getintfield(a, 'c_sll_hatype')) - self.unlock() - return res - - def get_addr(self): - a = self.lock(_c.sockaddr_ll) - lgt = rffi.getintfield(a, 'c_sll_halen') - d = [] - for i in range(lgt): - d.append(a.c_sll_addr[i]) - res = "".join(d) - self.unlock() - return res + def get_hatype(self): + a = self.lock(_c.sockaddr_ll) + res = bool(rffi.getintfield(a, 'c_sll_hatype')) + self.unlock() + return res - def as_object(self, fd, space): - return space.newtuple([space.wrap(self.get_ifname(fd)), - space.wrap(self.get_protocol()), - space.wrap(self.get_pkttype()), - space.wrap(self.get_hatype()), - space.wrap(self.get_addr())]) + def get_addr(self): + a = self.lock(_c.sockaddr_ll) + lgt = rffi.getintfield(a, 'c_sll_halen') + d = [] + for i in range(lgt): + d.append(a.c_sll_addr[i]) + res = "".join(d) + self.unlock() + return res + + def as_object(self, fd, space): + return space.newtuple([space.wrap(self.get_ifname(fd)), + space.wrap(self.get_protocol()), + space.wrap(self.get_pkttype()), + space.wrap(self.get_hatype()), + space.wrap(self.get_addr())]) class INETAddress(IPAddress): family = AF_INET From dan at codespeak.net Thu Sep 9 12:21:54 2010 From: dan at codespeak.net (dan at codespeak.net) Date: Thu, 9 Sep 2010 12:21:54 +0200 (CEST) Subject: [pypy-svn] r76966 - in pypy/branch/micronumpy/pypy/module: micronumpy micronumpy/test posix Message-ID: <20100909102154.E541B282B9C@codespeak.net> Author: dan Date: Thu Sep 9 12:21:53 2010 New Revision: 76966 Modified: pypy/branch/micronumpy/pypy/module/micronumpy/array.py pypy/branch/micronumpy/pypy/module/micronumpy/dtype.py pypy/branch/micronumpy/pypy/module/micronumpy/microarray.py pypy/branch/micronumpy/pypy/module/micronumpy/test/test_numpy.py pypy/branch/micronumpy/pypy/module/posix/interp_posix.py Log: Cache offset into array data, passing way more tests. Modified: pypy/branch/micronumpy/pypy/module/micronumpy/array.py ============================================================================== --- pypy/branch/micronumpy/pypy/module/micronumpy/array.py (original) +++ pypy/branch/micronumpy/pypy/module/micronumpy/array.py Thu Sep 9 12:21:53 2010 @@ -15,6 +15,7 @@ return stride def stride_column(shape, i): + assert i >= 0 i -= 1 stride = 1 while i >= 0: @@ -24,17 +25,18 @@ def size_from_shape(shape): size = 1 - for dimension in shape: - size *= dimension - return size + if len(shape) > 0: + for dimension in shape: + size *= dimension + return size + else: + return 0 def normalize_slice_starts(slice_starts, shape): for i in range(len(slice_starts)): - #print "slice_start[%d]=%d" % (i, slice_starts[i]) if slice_starts[i] < 0: slice_starts[i] += shape[i] elif slice_starts[i] >= shape[i]: - print "raising" raise IndexError("invalid index") return slice_starts @@ -100,6 +102,13 @@ space.wrap("invalid index")) # all as in numpy def infer_shape(space, w_values): + try: + values = space.str_w(w_values) + return [len(values)] + except OperationError, e: + if e.match(space, space.w_TypeError): pass + else: raise + shape = [] while True: try: Modified: pypy/branch/micronumpy/pypy/module/micronumpy/dtype.py ============================================================================== --- pypy/branch/micronumpy/pypy/module/micronumpy/dtype.py (original) +++ pypy/branch/micronumpy/pypy/module/micronumpy/dtype.py Thu Sep 9 12:21:53 2010 @@ -90,8 +90,9 @@ def cast(self, data): return rffi.cast(lltype.Ptr(arraytype), data) - def dump(self, data): - return ', '.join([str(x) for x in self.cast(data)]) + def dump(self, data, count): + data = self.cast(data) + return ', '.join([str(data[i]) for i in range(count)]) def typestr(self): if self is float_descr: @@ -134,15 +135,19 @@ # i is ?? int_descr = descriptor('i', 'int32', lltype.Signed) -type(int_descr).unwrap = lambda self, space, value: space.int_w(value) -type(int_descr).coerce_w = lambda self, space, value: space.int_w(space.int(value)) +IntDescrImpl = type(int_descr) +IntDescrImpl.unwrap = lambda self, space, value: space.int_w(value) +IntDescrImpl.coerce = lambda self, space, value: space.int(value) +IntDescrImpl.coerce_w = lambda self, space, value: space.int_w(space.int(value)) _int_index = _typeindex['i'] _typestring['int32'] = _int_index w_int_descr = _w_descriptors[_int_index] float_descr = descriptor('d', 'float64', lltype.Float) -type(float_descr).unwrap = lambda self, space, value: space.float_w(value) -type(float_descr).coerce_w = lambda self, space, value: space.float_w(space.float(value)) +FloatDescrImpl = type(float_descr) +FloatDescrImpl.unwrap = lambda self, space, value: space.float_w(value) +FloatDescrImpl.coerce = lambda self, space, value: space.float(value) +FloatDescrImpl.coerce_w = lambda self, space, value: space.float_w(space.float(value)) _float_index = _typeindex['d'] _typestring['float64'] = _float_index w_float_descr = _w_descriptors[_float_index] Modified: pypy/branch/micronumpy/pypy/module/micronumpy/microarray.py ============================================================================== --- pypy/branch/micronumpy/pypy/module/micronumpy/microarray.py (original) +++ pypy/branch/micronumpy/pypy/module/micronumpy/microarray.py Thu Sep 9 12:21:53 2010 @@ -21,54 +21,38 @@ from pypy.rpython.lltypesystem.lltype import cast_ptr_to_int -class FlatIter(Wrappable): - _immutable_fields_ = ['array', 'stop'] - def __init__(self, array): - self.array = array - self.i = 0 - self.stop = size_from_shape(array.shape) - - def descr_iter(self, space): - return space.wrap(self) - descr_iter.unwrap_spec = ['self', ObjSpace] - - def descr_next(self, space): - return self.array.getitem(space, self.i) # FIXME - descr_iter.unwrap_spec = ['self', ObjSpace] - class MicroIter(Wrappable): - _immutable_fields_ = ['array', 'step', 'stop', 'ndim'] + _immutable_fields_ = ['array', 'offset', 'step', 'shape', 'ndim'] def __init__(self, array): self.array = array self.i = 0 - self.index = array.slice_starts[:] - self.step = array.slice_steps[array.prefix] - self.stop = array.shape[array.prefix] - self.ndim = len(array.shape) - array.prefix + self.step = array.slice_steps[0] + self.shape = array.shape[0] + self.stride = array.strides[0] + self.ndim = len(array.shape) + self.offset = 0 def descr_iter(self, space): return space.wrap(self) descr_iter.unwrap_spec = ['self', ObjSpace] def descr_next(self, space): - if self.i < self.stop: - print self.index + if self.i < self.shape: if self.ndim > 1: ar = MicroArray(self.array.shape, self.array.dtype, parent=self.array, - offset=self.array.prefix + 1, - strides=self.array.strides, - slice_starts=self.index, - slice_steps=self.array.slice_steps) + offset=self.offset + self.array.offset, + strides=self.array.strides[1:], + slice_steps=self.array.slice_steps[1:]) next = space.wrap(ar) elif self.ndim == 1: - next = self.array.getitem(space, self.array.flatten_slice_starts(self.index)) + next = self.array.getitem(space, self.offset) else: raise OperationError(space.w_ValueError, space.wrap("Something is horribly wrong with this array's shape. Has %d dimensions." % len(self.array.shape))) self.i += 1 - self.index[self.array.prefix] += self.step + self.offset += self.step * self.stride return next else: raise OperationError(space.w_StopIteration, space.wrap("")) @@ -81,11 +65,10 @@ class MicroArray(BaseNumArray): - _immutable_fields_ = ['shape', 'strides', 'offset', 'slice_starts'] # XXX: removed parent + _immutable_fields_ = ['shape', 'parent', 'strides', 'offset', 'slice_starts'] def __init__(self, shape, dtype, - order='C', strides=[], parent=None, - prefix=0, offset=0, - slice_starts=[], slice_steps=[]): + order='C', strides=None, parent=None, + offset=0, slice_steps=None): assert dtype is not None self.shape = shape @@ -93,19 +76,22 @@ self.parent = parent self.order = order self.offset = offset - self.prefix = prefix - self.slice_starts = slice_starts[:] - for i in range(len(slice_starts), len(shape)): - self.slice_starts.append(0) + if slice_steps is not None: + self.slice_steps = slice_steps + else: + self.slice_steps = [] - self.slice_steps = slice_steps[:] - for i in range(len(slice_steps), len(shape)): + for i in range(len(self.slice_steps), len(shape)): self.slice_steps.append(1) size = size_from_shape(shape) - self.strides = strides[:] + if strides is not None: + self.strides = strides + else: + self.strides = [] + for i in range(len(self.strides), len(shape)): self.strides.append(self.stride(i)) @@ -117,14 +103,14 @@ self.data = null_data def descr_len(self, space): - return space.wrap(self.shape[self.prefix]) + return space.wrap(self.shape[0]) descr_len.unwrap_spec = ['self', ObjSpace] def getitem(self, space, offset): """Helper function. Grabs a value at an offset into the data.""" try: - return self.dtype.dtype.w_getitem(space, self.data, offset) + return self.dtype.dtype.w_getitem(space, self.data, self.offset + offset) except IndexError, e: raise OperationError(space.w_IndexError, space.wrap("index out of bounds")) @@ -133,31 +119,11 @@ """Helper function. Sets a value at an offset in the data.""" try: - self.dtype.dtype.w_setitem(space, self.data, offset, w_value) + self.dtype.dtype.w_setitem(space, self.data, self.offset + offset, w_value) except IndexError, e: raise OperationError(space.w_IndexError, space.wrap("index out of bounds")) - def flatten_slice_starts(self, slice_starts): - """Computes offset into subarray from all information. - Gives offset into subarray, not into data.""" - offset = 0 - for i in range(len(slice_starts)): - offset += slice_starts[i] * self.strides[i] - #print offset - return offset - - def flatten_slice_starts2(self, slice_starts): - """Computes offset into subarray from all information. - Gives offset into subarray, not into data.""" - offset = 0 - for i in range(len(slice_starts)): - offset += (self.slice_steps[i] * slice_starts[i]) * self.strides[i] - print offset - return offset - - flatten_index = flatten_slice_starts2 # TODO: migrate to slice_starts for name? - def stride(self, i): if self.order == 'C': return stride_row(self.shape, i) # row order for C @@ -169,161 +135,161 @@ def index2slices(self, space, w_index): dtype = self.dtype.dtype - slice_starts = self.slice_starts[:] - shape = self.shape[:] - slice_steps = self.slice_steps[:] + offset = 0 try: index = space.int_w(space.index(w_index)) - # Normalize if index < 0: - index += self.shape[self.prefix] - elif index > self.shape[self.prefix]: + index += self.shape[0] + elif index >= self.shape[0]: raise OperationError(space.w_IndexError, - space.wrap("invalid index")) # FIXME: message + space.wrap("index out of bounds")) - slice_starts[self.prefix] += index * self.slice_steps[self.prefix] - shape[self.prefix] = 1 #SQUEEZE_ME - return slice_starts, shape, slice_steps + offset = index * self.slice_steps[0] * self.strides[0] + return offset, self.shape[1:], self.slice_steps[1:], self.strides[1:] except OperationError, e: if e.match(space, space.w_TypeError): pass else:raise if isinstance(w_index, W_SliceObject): start, stop, step, length = w_index.indices4(space, self.shape[0]) - slice_starts[self.prefix] += start * slice_steps[self.prefix] - shape[self.prefix] = length - slice_steps[self.prefix] *= step - return slice_starts, shape, slice_steps + offset = start * self.slice_steps[0] * self.strides[0] + + shape = self.shape[:] + slice_steps = self.slice_steps[:] + shape[0] = length + slice_steps[0] *= step + return offset, shape, slice_steps, self.strides[:] elif space.is_w(w_index, space.w_Ellipsis): - return slice_starts, shape, slice_steps + return 0, self.shape[:], self.slice_steps[:], self.strides[:] - try: - indices = space.fixedview(w_index) + indices = space.fixedview(w_index) + + ndim = len(self.shape) - indexlen = len(indices) - if indexlen != len(self.shape): # FIXME: shape will often be larger... - raise OperationError(space.w_IndexError, - space.wrap("invalid index")) # FIXME: message + indexlen = len(indices) + if indexlen > ndim: + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) - for i in range(indexlen): - w_index = indices[i] - try: - index = space.int_w(space.index(w_index)) - slice_starts[self.prefix + i] += index - shape[self.prefix + i] = 1 #SQUEEZE_ME - continue - - except OperationError, e: - if e.match(space, space.w_TypeError): pass - else: raise - - if isinstance(w_index, W_SliceObject): - start, stop, step, length = w_index.indices4(space, self.shape[i]) - slice_starts[self.prefix + i] += start * slice_steps[self.prefix + i] - shape[self.prefix + i] = length - slice_steps[self.prefix + i] *= step - elif space.is_w(w_index, space.w_Ellipsis): - pass # I can't think of anything we need to do - else: - index = space.str(w_index) - raise OperationError(space.w_ValueError, - space.wrap("Don't support records," - " so pretend we don't have this field")) - raise OperationError(space.w_NotImplementedError, # this is the correct error - space.wrap("Don't support records yet.")) # for what we actually do + shape = [0] * ndim + strides = [0] * ndim + slice_steps = [0] * ndim + + resdim = 0 + for i in range(indexlen): + w_index = indices[i] try: - normalize_slice_starts(slice_starts, self.shape) # XXX: in-place operation - except IndexError, e: - raise OperationError(space.w_IndexError, - space.wrap("invalid index")) - finally: pass + index = space.int_w(space.index(w_index)) + if index < 0: + index += self.shape[i] + elif index >= self.shape[i]: + raise OperationError(space.w_IndexError, + space.wrap("index out of bounds")) + offset += index * self.slice_steps[i] * self.strides[i] + continue + + except OperationError, e: + if e.match(space, space.w_TypeError): pass + else: raise + + if isinstance(w_index, W_SliceObject): + start, stop, step, length = w_index.indices4(space, self.shape[i]) + offset += start * self.slice_steps[i] * self.strides[i] + shape[resdim] = length + slice_steps[resdim] = self.slice_steps[i] * step + resdim += 1 + elif space.is_w(w_index, space.w_Ellipsis): + shape[resdim] = self.shape[i] + slice_steps[resdim] = self.slice_steps[i] + strides[resdim] = self.strides[i] + resdim += 1 + else: + index = space.str(w_index) + raise OperationError(space.w_ValueError, + space.wrap("Don't support records," + " so pretend we don't have this field")) + raise OperationError(space.w_NotImplementedError, # this is the correct error + space.wrap("Don't support records yet.")) # for what we actually do - return slice_starts, shape, slice_steps + return offset, shape[:resdim], slice_steps[:resdim], strides[:resdim] def descr_getitem(self, space, w_index): - slice_starts, shape, slice_steps = self.index2slices(space, w_index) + offset, shape, slice_steps, strides = self.index2slices(space, w_index) size = size_from_shape(shape) - if size == 1: - return self.getitem(space, - self.flatten_slice_starts(slice_starts)) + if size == 0: + return self.getitem(space, offset) else: - prefix = shape_prefix(shape) ar = MicroArray(shape, dtype=self.dtype, parent=self, - offset=prefix, # XXX: what do we do about shapes that needs to be squeezed out? - strides=self.strides[:], - slice_starts=slice_starts, + offset=self.offset + offset, + strides=strides, slice_steps=slice_steps) return space.wrap(ar) descr_getitem.unwrap_spec = ['self', ObjSpace, W_Root] - def set_slice_single_value(self, space, slice_starts, shape, slice_steps, w_value): - index = slice_starts[:] + def set_slice_single_value(self, space, offset, shape, slice_steps, strides, w_value): if len(shape) > 1: for i in range(shape[0]): - self.set_slice_single_value(space, index, shape[1:], slice_steps[1:], w_value) - index[len(index) - len(shape)] += slice_steps[0] + self.set_slice_single_value(space, offset, shape[1:], slice_steps[1:], strides[1:], w_value) + offset += slice_steps[0] * strides[0] else: for i in range(shape[0]): - self.setitem(space, self.flatten_index(index), w_value) - index[len(index)-1] += slice_steps[0] + self.setitem(space, offset, w_value) + offset += slice_steps[0] * strides[0] - def set_slice(self, space, slice_starts, shape, slice_steps, w_value): + def set_slice(self, space, offset, shape, slice_steps, strides, w_value): try: length = space.int_w(space.len(w_value)) if length == 1: - self.set_slice_single_value(space, slice_starts, shape, slice_steps, w_value) + w_value = space.getitem(w_value, space.wrap(0)) + self.set_slice_single_value(space, offset, shape, slice_steps, strides, w_value) else: raise OperationError(space.w_NotImplementedError, # XXX: TODO space.wrap("TODO")) except OperationError, e: if e.match(space, space.w_TypeError): - self.set_slice_single_value(space, slice_starts, shape, slice_steps, w_value) + self.set_slice_single_value(space, offset, shape, slice_steps, strides, w_value) else: raise def descr_setitem(self, space, w_index, w_value): dtype = self.dtype.dtype - slice_starts, shape, slice_steps = self.index2slices(space, w_index) + offset, shape, slice_steps, strides = self.index2slices(space, w_index) size = size_from_shape(shape) try: - if space.int_w(space.len(w_value)) == 1: - w_value = space.getitem(w_value, space.wrap(0)) + # XXX: if size is 0 we shouldn't really infer value_shape = infer_shape(space, w_value) value_size = size_from_shape(value_shape) - except OperationError, e: if e.match(space, space.w_TypeError): - value_size = 1 - value_shape = [1] + value_shape = [] + value_size = 0 else: raise - if squeeze_shape(value_shape) != squeeze_shape(shape): - raise OperationError(space.w_ValueError, - space.wrap("shape mismatch: objects cannot" - " be broadcast to a single shape")) - - if size == 1: - self.setitem(space, - self.flatten_index(slice_starts), - w_value) + if size == 0: + if len(value_shape) > 0: + raise OperationError(space.w_ValueError, + space.wrap("shape mismatch: objects cannot" + " be broadcast to a single shape")) + + self.setitem(space, offset, self.dtype.dtype.coerce(space, w_value)) else: - if value_size == 1: - self.set_slice_single_value(space, - slice_starts, shape, slice_steps, - w_value) - else: - self.set_slice(space, - slice_starts, shape, slice_steps, - w_value) + if squeeze_shape(value_shape) != squeeze_shape(shape): + raise OperationError(space.w_ValueError, + space.wrap("shape mismatch: objects cannot" + " be broadcast to a single shape")) + + self.set_slice(space, + offset, shape, slice_steps, strides, + w_value) descr_setitem.unwrap_spec = ['self', ObjSpace, W_Root, W_Root] def descr_repr(self, space): @@ -410,7 +376,7 @@ return space.wrap(self.dtype) def descr_get_shape(space, self): - return space.newtuple([space.wrap(x) for x in self.shape[self.prefix:]]) + return space.newtuple([space.wrap(x) for x in self.shape]) def descr_get_array_interface(space, self): w_dict = space.newdict() Modified: pypy/branch/micronumpy/pypy/module/micronumpy/test/test_numpy.py ============================================================================== --- pypy/branch/micronumpy/pypy/module/micronumpy/test/test_numpy.py (original) +++ pypy/branch/micronumpy/pypy/module/micronumpy/test/test_numpy.py Thu Sep 9 12:21:53 2010 @@ -426,6 +426,7 @@ assert typecode == infer_from_iterable(space, w_xs).typecode class TestMicroArray(object): + @py.test.mark.xfail # XXX: return types changed def test_index2strides(self, space): from pypy.module.micronumpy.microarray import MicroArray from pypy.module.micronumpy.dtype import w_int_descr @@ -473,6 +474,7 @@ offset = squeeze(slice_starts, shape, slice_steps, strides) + @py.test.mark.xfail # XXX: arguments changed def test_slice_setting(self, space): from pypy.module.micronumpy.array import size_from_shape from pypy.module.micronumpy.microarray import MicroArray @@ -521,25 +523,6 @@ assert row_strides(shape) == [30, 6, 2, 1] assert column_strides(shape) == [1, 7, 35, 105] - def test_flatten_index(self, space): - from pypy.module.micronumpy.microarray import MicroArray - from pypy.module.micronumpy.dtype import w_int_descr - - ar = MicroArray(shape=[7, 5, 3, 2], - dtype=w_int_descr) - - offset = ar.flatten_index([0, 0, 0, 0]) - assert offset == 0 - - offset = ar.flatten_index([0, 0, 0, 1]) - assert offset == 1 - - offset = ar.flatten_index([0, 0, 1, 1]) - assert offset == 3 - - offset = ar.flatten_index([0, 2, 0, 1]) - assert offset == 13 - def test_memory_layout(self, space): from pypy.module.micronumpy.microarray import MicroArray from pypy.module.micronumpy.microarray import array @@ -556,11 +539,12 @@ memlen = len(column_major) ar = array(space, w_data, w_dtype=w_int_descr, order='C') #C for C not column + for i in range(memlen): array_element = space.unwrap(ar.getitem(space, i)) # ugly, but encapsulates everything - assert array_element == row_major[i], "Array Data: %r, Array Index: %d (%s != %s)" % (ar.dtype.dtype.dump(ar.data), i, array_element, row_major[i]) + assert array_element == row_major[i], "Array Data: %r, Array Index: %d (%s != %s)" % (ar.dtype.dtype.dump(ar.data, 6), i, array_element, row_major[i]) ar = array(space, w_data, w_dtype=w_int_descr, order='F') for i in range(memlen): array_element = space.unwrap(ar.getitem(space, i)) # ugly, but encapsulates everything - assert array_element == column_major[i], "Array Data: %r, Array Index: %d (%s != %s)" % (ar.dtype.dtype.dump(ar.data), i, array_element, row_major[i]) + assert array_element == column_major[i], "Array Data: %r, Array Index: %d (%s != %s)" % (ar.dtype.dtype.dump(ar.data, 6), i, array_element, row_major[i]) Modified: pypy/branch/micronumpy/pypy/module/posix/interp_posix.py ============================================================================== --- pypy/branch/micronumpy/pypy/module/posix/interp_posix.py (original) +++ pypy/branch/micronumpy/pypy/module/posix/interp_posix.py Thu Sep 9 12:21:53 2010 @@ -6,8 +6,6 @@ from pypy.interpreter.error import OperationError, wrap_oserror, wrap_oserror2 from pypy.rpython.module.ll_os import RegisterOs from pypy.rpython.module import ll_os_stat -from pypy.rpython.lltypesystem import rffi, lltype -from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo import os, sys @@ -449,6 +447,7 @@ class State: + from pypy.rpython.lltypesystem import rffi, lltype def __init__(self, space): self.space = space self.w_environ = space.newdict() @@ -956,6 +955,8 @@ if _WIN: from pypy.rlib import rwin32 + from pypy.rpython.lltypesystem import rffi, lltype + from pypy.rpython.tool import rffi_platform eci = ExternalCompilationInfo( includes = ['windows.h', 'wincrypt.h'], From arigo at codespeak.net Thu Sep 9 14:52:19 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 9 Sep 2010 14:52:19 +0200 (CEST) Subject: [pypy-svn] r76967 - in pypy/trunk/pypy/jit: backend/test backend/x86 metainterp Message-ID: <20100909125219.146E0282B9C@codespeak.net> Author: arigo Date: Thu Sep 9 14:52:16 2010 New Revision: 76967 Modified: pypy/trunk/pypy/jit/backend/test/runner_test.py pypy/trunk/pypy/jit/backend/x86/regalloc.py pypy/trunk/pypy/jit/metainterp/jitdriver.py pypy/trunk/pypy/jit/metainterp/warmspot.py Log: The portal_calldescr should not be attached to the CPU, but to the JitDriverSD. (almost not used any more but still) Modified: pypy/trunk/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/trunk/pypy/jit/backend/test/runner_test.py (original) +++ pypy/trunk/pypy/jit/backend/test/runner_test.py Thu Sep 9 14:52:16 2010 @@ -1777,7 +1777,7 @@ self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 RES = lltype.Signed - self.cpu.portal_calldescr = self.cpu.calldescrof( + FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES) for i in range(10): self.cpu.set_future_value_int(i, i+1) @@ -1816,7 +1816,7 @@ ARGS = [lltype.Float, lltype.Float] RES = lltype.Float - self.cpu.portal_calldescr = self.cpu.calldescrof( + FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES) ops = ''' Modified: pypy/trunk/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/trunk/pypy/jit/backend/x86/regalloc.py Thu Sep 9 14:52:16 2010 @@ -665,13 +665,11 @@ self._consider_call(op, guard_op) def consider_call_assembler(self, op, guard_op): - portal_calldescr = self.assembler.cpu.portal_calldescr - size = portal_calldescr.get_result_size(self.translate_support_code) - # descr = op.descr assert isinstance(descr, LoopToken) jd = descr.outermost_jitdriver_sd assert jd is not None + size = jd.portal_calldescr.get_result_size(self.translate_support_code) vable_index = jd.index_of_virtualizable if vable_index >= 0: self.rm._sync_var(op.args[vable_index]) Modified: pypy/trunk/pypy/jit/metainterp/jitdriver.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/jitdriver.py (original) +++ pypy/trunk/pypy/jit/metainterp/jitdriver.py Thu Sep 9 14:52:16 2010 @@ -8,6 +8,7 @@ # self.portal_graph ... pypy.jit.metainterp.warmspot # self.portal_runner_ptr ... pypy.jit.metainterp.warmspot # self.portal_runner_adr ... pypy.jit.metainterp.warmspot + # self.portal_calldescr ... pypy.jit.metainterp.warmspot # self.num_green_args ... pypy.jit.metainterp.warmspot # self.result_type ... pypy.jit.metainterp.warmspot # self.virtualizable_info... pypy.jit.metainterp.warmspot Modified: pypy/trunk/pypy/jit/metainterp/warmspot.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/warmspot.py (original) +++ pypy/trunk/pypy/jit/metainterp/warmspot.py Thu Sep 9 14:52:16 2010 @@ -648,7 +648,7 @@ jd.portal_runner_ptr = self.helper_func(jd._PTR_PORTAL_FUNCTYPE, ll_portal_runner) jd.portal_runner_adr = llmemory.cast_ptr_to_adr(jd.portal_runner_ptr) - self.cpu.portal_calldescr = self.cpu.calldescrof( + jd.portal_calldescr = self.cpu.calldescrof( jd._PTR_PORTAL_FUNCTYPE.TO, jd._PTR_PORTAL_FUNCTYPE.TO.ARGS, jd._PTR_PORTAL_FUNCTYPE.TO.RESULT) From arigo at codespeak.net Thu Sep 9 14:53:30 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 9 Sep 2010 14:53:30 +0200 (CEST) Subject: [pypy-svn] r76968 - in pypy/branch/jit-generator/pypy/jit: backend/test backend/x86 metainterp Message-ID: <20100909125330.8C405282B9C@codespeak.net> Author: arigo Date: Thu Sep 9 14:53:29 2010 New Revision: 76968 Modified: pypy/branch/jit-generator/pypy/jit/backend/test/runner_test.py pypy/branch/jit-generator/pypy/jit/backend/x86/regalloc.py pypy/branch/jit-generator/pypy/jit/metainterp/jitdriver.py pypy/branch/jit-generator/pypy/jit/metainterp/warmspot.py Log: Merge r76967 into this branch. Modified: pypy/branch/jit-generator/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/backend/test/runner_test.py (original) +++ pypy/branch/jit-generator/pypy/jit/backend/test/runner_test.py Thu Sep 9 14:53:29 2010 @@ -1777,7 +1777,7 @@ self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 RES = lltype.Signed - self.cpu.portal_calldescr = self.cpu.calldescrof( + FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES) for i in range(10): self.cpu.set_future_value_int(i, i+1) @@ -1816,7 +1816,7 @@ ARGS = [lltype.Float, lltype.Float] RES = lltype.Float - self.cpu.portal_calldescr = self.cpu.calldescrof( + FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES) ops = ''' Modified: pypy/branch/jit-generator/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/branch/jit-generator/pypy/jit/backend/x86/regalloc.py Thu Sep 9 14:53:29 2010 @@ -665,13 +665,11 @@ self._consider_call(op, guard_op) def consider_call_assembler(self, op, guard_op): - portal_calldescr = self.assembler.cpu.portal_calldescr - size = portal_calldescr.get_result_size(self.translate_support_code) - # descr = op.descr assert isinstance(descr, LoopToken) jd = descr.outermost_jitdriver_sd assert jd is not None + size = jd.portal_calldescr.get_result_size(self.translate_support_code) vable_index = jd.index_of_virtualizable if vable_index >= 0: self.rm._sync_var(op.args[vable_index]) Modified: pypy/branch/jit-generator/pypy/jit/metainterp/jitdriver.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/jitdriver.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/jitdriver.py Thu Sep 9 14:53:29 2010 @@ -8,6 +8,7 @@ # self.portal_graph ... pypy.jit.metainterp.warmspot # self.portal_runner_ptr ... pypy.jit.metainterp.warmspot # self.portal_runner_adr ... pypy.jit.metainterp.warmspot + # self.portal_calldescr ... pypy.jit.metainterp.warmspot # self.num_green_args ... pypy.jit.metainterp.warmspot # self.result_type ... pypy.jit.metainterp.warmspot # self.virtualizable_info... pypy.jit.metainterp.warmspot Modified: pypy/branch/jit-generator/pypy/jit/metainterp/warmspot.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/warmspot.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/warmspot.py Thu Sep 9 14:53:29 2010 @@ -650,7 +650,7 @@ jd.portal_runner_ptr = self.helper_func(jd._PTR_PORTAL_FUNCTYPE, ll_portal_runner) jd.portal_runner_adr = llmemory.cast_ptr_to_adr(jd.portal_runner_ptr) - self.cpu.portal_calldescr = self.cpu.calldescrof( + jd.portal_calldescr = self.cpu.calldescrof( jd._PTR_PORTAL_FUNCTYPE.TO, jd._PTR_PORTAL_FUNCTYPE.TO.ARGS, jd._PTR_PORTAL_FUNCTYPE.TO.RESULT) From arigo at codespeak.net Thu Sep 9 16:26:19 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 9 Sep 2010 16:26:19 +0200 (CEST) Subject: [pypy-svn] r76970 - pypy/branch/saner-guard-exc Message-ID: <20100909142619.BACF6282B9C@codespeak.net> Author: arigo Date: Thu Sep 9 16:26:17 2010 New Revision: 76970 Added: pypy/branch/saner-guard-exc/ - copied from r76969, pypy/trunk/ Log: A branch in which to simplify the GUARD_(NO)_EXCEPTION operations. From arigo at codespeak.net Thu Sep 9 16:29:24 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 9 Sep 2010 16:29:24 +0200 (CEST) Subject: [pypy-svn] r76971 - pypy/branch/saner-guard-exc/pypy/jit/metainterp Message-ID: <20100909142924.D51DD282B9C@codespeak.net> Author: arigo Date: Thu Sep 9 16:29:23 2010 New Revision: 76971 Modified: pypy/branch/saner-guard-exc/pypy/jit/metainterp/resoperation.py Log: The goal is to do these changes to resoperation.py. Modified: pypy/branch/saner-guard-exc/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/saner-guard-exc/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/saner-guard-exc/pypy/jit/metainterp/resoperation.py Thu Sep 9 16:29:23 2010 @@ -74,10 +74,6 @@ def is_foldable_guard(self): return rop._GUARD_FOLDABLE_FIRST <= self.opnum <= rop._GUARD_FOLDABLE_LAST - def is_guard_exception(self): - return (self.opnum == rop.GUARD_EXCEPTION or - self.opnum == rop.GUARD_NO_EXCEPTION) - def is_guard_overflow(self): return (self.opnum == rop.GUARD_OVERFLOW or self.opnum == rop.GUARD_NO_OVERFLOW) @@ -126,8 +122,6 @@ 'GUARD_ISNULL', 'GUARD_NONNULL_CLASS', '_GUARD_FOLDABLE_LAST', - 'GUARD_NO_EXCEPTION', - 'GUARD_EXCEPTION', 'GUARD_NO_OVERFLOW', 'GUARD_OVERFLOW', 'GUARD_NOT_FORCED', @@ -207,6 +201,7 @@ 'NEW_ARRAY/1d', 'FORCE_TOKEN/0', 'VIRTUAL_REF/2', # removed before it's passed to the backend + 'LAST_EXC/0', '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- 'SETARRAYITEM_GC/3d', @@ -222,6 +217,7 @@ 'COND_CALL_GC_WB', # [objptr, newvalue] (for the write barrier) 'DEBUG_MERGE_POINT/1', # debugging only 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend + 'CLEAR_EXC/0', '_CANRAISE_FIRST', # ----- start of can_raise operations ----- 'CALL', From arigo at codespeak.net Thu Sep 9 16:45:32 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 9 Sep 2010 16:45:32 +0200 (CEST) Subject: [pypy-svn] r76972 - in pypy/branch/saner-guard-exc/pypy/jit: backend backend/llgraph backend/test metainterp metainterp/test Message-ID: <20100909144532.40DF0282B9C@codespeak.net> Author: arigo Date: Thu Sep 9 16:45:29 2010 New Revision: 76972 Modified: pypy/branch/saner-guard-exc/pypy/jit/backend/llgraph/llimpl.py pypy/branch/saner-guard-exc/pypy/jit/backend/llgraph/runner.py pypy/branch/saner-guard-exc/pypy/jit/backend/model.py pypy/branch/saner-guard-exc/pypy/jit/backend/test/runner_test.py pypy/branch/saner-guard-exc/pypy/jit/metainterp/executor.py pypy/branch/saner-guard-exc/pypy/jit/metainterp/test/test_basic.py Log: Mostly killing code around. Modified: pypy/branch/saner-guard-exc/pypy/jit/backend/llgraph/llimpl.py ============================================================================== --- pypy/branch/saner-guard-exc/pypy/jit/backend/llgraph/llimpl.py (original) +++ pypy/branch/saner-guard-exc/pypy/jit/backend/llgraph/llimpl.py Thu Sep 9 16:45:29 2010 @@ -136,8 +136,6 @@ 'guard_false' : (('bool',), None), 'guard_value' : (('int', 'int'), None), 'guard_class' : (('ref', 'ref'), None), - 'guard_no_exception' : ((), None), - 'guard_exception' : (('ref',), 'ref'), 'guard_no_overflow' : ((), None), 'guard_overflow' : ((), None), 'guard_nonnull' : (('ref',), None), @@ -598,39 +596,12 @@ if value != expected_value: raise GuardFailed - def op_guard_no_exception(self, _): - if _last_exception: - raise GuardFailed - - def _check_exception(self, expected_exception): - global _last_exception - expected_exception = self._cast_exception(expected_exception) - assert expected_exception - exc = _last_exception - if exc: - got = exc.args[0] - # exact match! - if got != expected_exception: - return False - return True - else: - return False - - def _cast_exception(self, exception): - return llmemory.cast_adr_to_ptr( - llmemory.cast_int_to_adr(exception), - rclass.CLASSTYPE) - - def _issubclass(self, cls1, cls2): - return rclass.ll_issubclass(cls1, cls2) + def op_last_exc(self, _): + return grab_exc_value() - def op_guard_exception(self, _, expected_exception): + def op_clear_exc(self, _): global _last_exception - if not self._check_exception(expected_exception): - raise GuardFailed - res = _last_exception[1] _last_exception = None - return res def op_guard_no_overflow(self, _): flag = self.overflow_flag @@ -978,12 +949,6 @@ cls2 = ootype.cast_from_object(ootype.Class, obj2) return ootype.subclassof(cls1, cls2) - def _cast_exception(self, exception): - return ootype.cast_from_object(ootype.Class, exception) - - def _issubclass(self, cls1, cls2): - return ootype.subclassof(cls1, cls2) - # ____________________________________________________________ def cast_to_int(x): @@ -1558,7 +1523,6 @@ setannotation(frame_get_value_count, annmodel.SomeInteger()) setannotation(frame_clear_latest_values, annmodel.s_None) -setannotation(grab_exc_value, annmodel.SomePtr(llmemory.GCREF)) setannotation(force, annmodel.SomeInteger()) setannotation(get_forced_token_frame, s_Frame) setannotation(get_frame_forced_token, annmodel.SomeAddress()) Modified: pypy/branch/saner-guard-exc/pypy/jit/backend/llgraph/runner.py ============================================================================== --- pypy/branch/saner-guard-exc/pypy/jit/backend/llgraph/runner.py (original) +++ pypy/branch/saner-guard-exc/pypy/jit/backend/llgraph/runner.py Thu Sep 9 16:45:29 2010 @@ -290,9 +290,6 @@ return self.getdescr(0, token[0], extrainfo=extrainfo, arg_types=''.join(arg_types)) - def grab_exc_value(self): - return llimpl.grab_exc_value() - def arraydescrof(self, A): assert A.OF != lltype.Void size = symbolic.get_size(A) Modified: pypy/branch/saner-guard-exc/pypy/jit/backend/model.py ============================================================================== --- pypy/branch/saner-guard-exc/pypy/jit/backend/model.py (original) +++ pypy/branch/saner-guard-exc/pypy/jit/backend/model.py Thu Sep 9 16:45:29 2010 @@ -101,12 +101,6 @@ values -- normally get_latest_value_count().""" raise NotImplementedError - def grab_exc_value(self): - """Return and clear the exception set by the latest execute_token(), - when it exits due to a failure of a GUARD_EXCEPTION or - GUARD_NO_EXCEPTION. (Returns a GCREF)""" # XXX remove me - raise NotImplementedError - @staticmethod def sizeof(S): raise NotImplementedError Modified: pypy/branch/saner-guard-exc/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/branch/saner-guard-exc/pypy/jit/backend/test/runner_test.py (original) +++ pypy/branch/saner-guard-exc/pypy/jit/backend/test/runner_test.py Thu Sep 9 16:45:29 2010 @@ -377,8 +377,6 @@ looptoken = LoopToken() self.cpu.compile_loop([v1, v2], ops, looptoken) for x, y, z in testcases: - excvalue = self.cpu.grab_exc_value() - assert not excvalue self.cpu.set_future_value_int(0, x) self.cpu.set_future_value_int(1, y) fail = self.cpu.execute_token(looptoken) @@ -388,8 +386,6 @@ assert fail.identifier == 2 if z != boom: assert self.cpu.get_latest_value_int(0) == z - excvalue = self.cpu.grab_exc_value() - assert not excvalue def test_ovf_operations_reversed(self): self.test_ovf_operations(reversed=True) @@ -1330,8 +1326,9 @@ [i0] i1 = same_as(1) call(ConstClass(fptr), i0, descr=calldescr) - p0 = guard_exception(ConstClass(xtp)) [i1] - finish(0, p0) + p0 = last_exc() + clear_exc() + finish(p0) ''' FPTR = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) fptr = llhelper(FPTR, func) @@ -1344,64 +1341,19 @@ hints={'vtable': xtp._obj}) xptr = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(X)) - exc_tp = xtp exc_ptr = xptr loop = parse(ops, self.cpu, namespace=locals()) self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) self.cpu.set_future_value_int(0, 1) self.cpu.execute_token(loop.token) - assert self.cpu.get_latest_value_int(0) == 0 - assert self.cpu.get_latest_value_ref(1) == xptr - excvalue = self.cpu.grab_exc_value() - assert not excvalue + assert self.cpu.get_latest_value_ref(0) == xptr self.cpu.set_future_value_int(0, 0) self.cpu.execute_token(loop.token) - assert self.cpu.get_latest_value_int(0) == 1 - excvalue = self.cpu.grab_exc_value() - assert not excvalue + assert self.cpu.get_latest_value_ref(0) == lltype.nullptr( + llmemory.GCREF.TO) - ytp = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) - ytp.subclassrange_min = 2 - ytp.subclassrange_max = 2 - assert rclass.ll_issubclass(ytp, xtp) - Y = lltype.GcStruct('Y', ('parent', rclass.OBJECT), - hints={'vtable': ytp._obj}) - yptr = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(Y)) - - # guard_exception uses an exact match - exc_tp = ytp - exc_ptr = yptr - loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) - assert self.cpu.get_latest_value_int(0) == 1 - excvalue = self.cpu.grab_exc_value() - assert excvalue == yptr - assert not self.cpu.grab_exc_value() # cleared - - exc_tp = xtp - exc_ptr = xptr - ops = ''' - [i0] - i1 = same_as(1) - call(ConstClass(fptr), i0, descr=calldescr) - guard_no_exception() [i1] - finish(0) - ''' - loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) - assert self.cpu.get_latest_value_int(0) == 1 - excvalue = self.cpu.grab_exc_value() - assert excvalue == xptr - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(loop.token) - assert self.cpu.get_latest_value_int(0) == 0 - excvalue = self.cpu.grab_exc_value() - assert not excvalue + assert not hasattr(self.cpu, 'grab_exc_value') # old interface def test_cond_call_gc_wb(self): def func_void(a, b): Modified: pypy/branch/saner-guard-exc/pypy/jit/metainterp/executor.py ============================================================================== --- pypy/branch/saner-guard-exc/pypy/jit/metainterp/executor.py (original) +++ pypy/branch/saner-guard-exc/pypy/jit/metainterp/executor.py Thu Sep 9 16:45:29 2010 @@ -293,6 +293,8 @@ rop.COND_CALL_GC_WB, rop.DEBUG_MERGE_POINT, rop.SETARRAYITEM_RAW, + rop.LAST_EXC, + rop.CLEAR_EXC, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) Modified: pypy/branch/saner-guard-exc/pypy/jit/metainterp/test/test_basic.py ============================================================================== --- pypy/branch/saner-guard-exc/pypy/jit/metainterp/test/test_basic.py (original) +++ pypy/branch/saner-guard-exc/pypy/jit/metainterp/test/test_basic.py Thu Sep 9 16:45:29 2010 @@ -375,7 +375,8 @@ return externfn(n, n+1) res = self.interp_operations(f, [6]) assert res == 42 - self.check_operations_history(int_add=1, int_mul=0, call=1, guard_no_exception=0) + self.check_operations_history(int_add=1, int_mul=0, call=1, + last_exc=0, clear_exc=0) def test_residual_call_pure(self): def externfn(x, y): From afa at codespeak.net Thu Sep 9 16:47:48 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Thu, 9 Sep 2010 16:47:48 +0200 (CEST) Subject: [pypy-svn] r76973 - pypy/trunk/pypy/rpython/module Message-ID: <20100909144748.98F86282B9C@codespeak.net> Author: afa Date: Thu Sep 9 16:47:47 2010 New Revision: 76973 Modified: pypy/trunk/pypy/rpython/module/ll_os_stat.py Log: Fix translation on Windows Modified: pypy/trunk/pypy/rpython/module/ll_os_stat.py ============================================================================== --- pypy/trunk/pypy/rpython/module/ll_os_stat.py (original) +++ pypy/trunk/pypy/rpython/module/ll_os_stat.py Thu Sep 9 16:47:47 2010 @@ -437,5 +437,5 @@ def time_t_to_FILE_TIME(time, filetime): ft = lltype.r_longlong((time + secs_between_epochs) * 10000000) filetime.c_dwHighDateTime = lltype.r_uint(ft >> 32) - filetime.c_dwLowDateTime = lltype.r_uint(ft & ((1 << 32) - 1)) + filetime.c_dwLowDateTime = lltype.r_uint(ft & lltype.r_uint(-1)) From afa at codespeak.net Thu Sep 9 16:52:19 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Thu, 9 Sep 2010 16:52:19 +0200 (CEST) Subject: [pypy-svn] r76974 - in pypy/trunk/pypy: module/_winreg rlib rpython/lltypesystem rpython/module translator/c Message-ID: <20100909145219.DC82E282B9C@codespeak.net> Author: afa Date: Thu Sep 9 16:52:18 2010 New Revision: 76974 Modified: pypy/trunk/pypy/module/_winreg/interp_winreg.py pypy/trunk/pypy/rlib/rmmap.py pypy/trunk/pypy/rlib/rwin32.py pypy/trunk/pypy/rpython/lltypesystem/rffi.py pypy/trunk/pypy/rpython/module/ll_os.py pypy/trunk/pypy/rpython/module/ll_win32file.py pypy/trunk/pypy/translator/c/database.py Log: Turn rwin32.HANDLE into an Opaque type that emits "HANDLE" in C. this removes a lot of compilation warnings on Windows. Modified: pypy/trunk/pypy/module/_winreg/interp_winreg.py ============================================================================== --- pypy/trunk/pypy/module/_winreg/interp_winreg.py (original) +++ pypy/trunk/pypy/module/_winreg/interp_winreg.py Thu Sep 9 16:52:18 2010 @@ -20,16 +20,19 @@ self.Close(space) descr_del.unwrap_spec = ['self', ObjSpace] + def as_int(self): + return rffi.cast(rffi.SIZE_T, self.hkey) + def descr_nonzero(self, space): - return space.wrap(self.hkey != 0) + return space.wrap(self.as_int() != 0) descr_nonzero.unwrap_spec = ['self', ObjSpace] def descr_repr(self, space): - return space.wrap("" % (self.hkey,)) + return space.wrap("" % (self.as_int(),)) descr_repr.unwrap_spec = ['self', ObjSpace] def descr_int(self, space): - return space.wrap(self.hkey) + return space.wrap(self.as_int()) descr_int.unwrap_spec = ['self', ObjSpace] def Close(self, space): @@ -49,12 +52,13 @@ need the underlying win32 handle to exist beyond the lifetime of the handle object. On 64 bit windows, the result of this function is a long integer""" - hkey = self.hkey - self.hkey = 0 - return space.wrap(hkey) + key = self.as_int() + self.hkey = rwin32.NULL_HANDLE + return space.wrap(key) Detach.unwrap_spec = ['self', ObjSpace] -def new_HKEY(space, w_subtype, hkey): +def new_HKEY(space, w_subtype, key): + hkey = rffi.cast(rwinreg.HKEY, key) return space.wrap(W_HKEY(hkey)) descr_HKEY_new = interp2app(new_HKEY, unwrap_spec=[ObjSpace, W_Root, int]) @@ -98,9 +102,9 @@ elif isinstance(w_hkey, W_HKEY): return w_hkey.hkey elif space.is_true(space.isinstance(w_hkey, space.w_int)): - return space.int_w(w_hkey) + return rffi.cast(rwinreg.HKEY, space.int_w(w_hkey)) elif space.is_true(space.isinstance(w_hkey, space.w_long)): - return space.uint_w(w_hkey) + return rffi.cast(rwinreg.HKEY, space.uint_w(w_hkey)) else: errstring = space.wrap("The object is not a PyHKEY object") raise OperationError(space.w_TypeError, errstring) @@ -631,8 +635,8 @@ null_dword, ft) if ret != 0: raiseWindowsError(space, ret, 'RegQueryInfoKey') - l = (ft[0].c_dwLowDateTime + - (ft[0].c_dwHighDateTime << 32)) + l = ((lltype.r_longlong(ft[0].c_dwHighDateTime) << 32) + + lltype.r_longlong(ft[0].c_dwLowDateTime)) return space.newtuple([space.wrap(nSubKeys[0]), space.wrap(nValues[0]), space.wrap(l)]) Modified: pypy/trunk/pypy/rlib/rmmap.py ============================================================================== --- pypy/trunk/pypy/rlib/rmmap.py (original) +++ pypy/trunk/pypy/rlib/rmmap.py Thu Sep 9 16:52:18 2010 @@ -72,6 +72,7 @@ setattr(CConfig, name, rffi_platform.ConstantInteger(name)) from pypy.rlib.rwin32 import HANDLE, LPHANDLE + from pypy.rlib.rwin32 import NULL_HANDLE, INVALID_HANDLE_VALUE from pypy.rlib.rwin32 import DWORD, WORD, DWORD_PTR, LPDWORD from pypy.rlib.rwin32 import BOOL, LPVOID, LPCVOID, LPCSTR, SIZE_T from pypy.rlib.rwin32 import INT, LONG, PLONG @@ -183,7 +184,7 @@ ##_get_osfhandle = winexternal('_get_osfhandle', [INT], LONG) # casting from int to handle did not work, so I changed this # but it should not be so! - _get_osfhandle = winexternal('_get_osfhandle', [INT], HANDLE) + _get_osfhandle = winexternal('_get_osfhandle', [INT], rffi.INTPTR_T) GetLastError = winexternal('GetLastError', [], DWORD) VirtualAlloc = winexternal('VirtualAlloc', [rffi.VOIDP, rffi.SIZE_T, DWORD, DWORD], @@ -228,8 +229,7 @@ def _get_error_no(): return rffi.cast(lltype.Signed, GetLastError()) - NULL_HANDLE = rffi.cast(HANDLE, 0) - INVALID_HANDLE = rffi.cast(HANDLE, -1) + INVALID_HANDLE = INVALID_HANDLE_VALUE PAGESIZE = _get_page_size() NULL = lltype.nullptr(PTR.TO) @@ -684,12 +684,11 @@ # assume -1 and 0 both mean invalid file descriptor # to 'anonymously' map memory. if fileno != -1 and fileno != 0: - fh = _get_osfhandle(fileno) - # parts of the C library use HANDLE, others just ints - # XXX hack - made _get_osfhandle compatible - if fh == INVALID_HANDLE: + res = _get_osfhandle(fileno) + if res == rffi.cast(rffi.SSIZE_T, INVALID_HANDLE): errno = _get_error_no() raise OSError(errno, os.strerror(errno)) + fh = rffi.cast(HANDLE, res) # Win9x appears to need us seeked to zero # SEEK_SET = 0 # libc._lseek(fileno, 0, SEEK_SET) Modified: pypy/trunk/pypy/rlib/rwin32.py ============================================================================== --- pypy/trunk/pypy/rlib/rwin32.py (original) +++ pypy/trunk/pypy/rlib/rwin32.py Thu Sep 9 16:52:18 2010 @@ -81,9 +81,10 @@ return rffi.llexternal(name, args, result, compilation_info=eci, calling_conv='win') if WIN32: - HANDLE = rffi.ULONG + HANDLE = rffi.COpaquePtr(typedef='HANDLE') LPHANDLE = rffi.CArrayPtr(HANDLE) HMODULE = HANDLE + NULL_HANDLE = rffi.cast(HANDLE, 0) INVALID_HANDLE_VALUE = rffi.cast(HANDLE, -1) PFILETIME = rffi.CArrayPtr(FILETIME) Modified: pypy/trunk/pypy/rpython/lltypesystem/rffi.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/rffi.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/rffi.py Thu Sep 9 16:52:18 2010 @@ -358,9 +358,11 @@ if os.name != 'nt': TYPES.append('mode_t') TYPES.append('pid_t') + TYPES.append('ssize_t') else: MODE_T = lltype.Signed PID_T = lltype.Signed + SSIZE_T = lltype.Signed def populate_inttypes(): names = [] @@ -415,6 +417,7 @@ # ULONGLONG r_ulonglong # WCHAR_T r_wchar_t # SIZE_T r_size_t +# SSIZE_T r_ssize_t # TIME_T r_time_t # -------------------------------------------------------------------- # Note that rffi.r_int is not necessarily the same as @@ -535,6 +538,8 @@ # (use SIGNEDCHAR or UCHAR for the small integer types) CHAR = lltype.Char +INTPTR_T = SSIZE_T + # double DOUBLE = lltype.Float Modified: pypy/trunk/pypy/rpython/module/ll_os.py ============================================================================== --- pypy/trunk/pypy/rpython/module/ll_os.py (original) +++ pypy/trunk/pypy/rpython/module/ll_os.py Thu Sep 9 16:52:18 2010 @@ -1046,7 +1046,7 @@ rffi.VOIDP, rwin32.DWORD], rwin32.BOOL) - _open_osfhandle = self.llexternal('_open_osfhandle', [rffi.ULONG, + _open_osfhandle = self.llexternal('_open_osfhandle', [rffi.INTPTR_T, rffi.INT], rffi.INT) null = lltype.nullptr(rffi.VOIDP.TO) @@ -1059,8 +1059,8 @@ error = 0 else: error = rwin32.GetLastError() - hread = pread[0] - hwrite = pwrite[0] + hread = rffi.cast(rffi.INTPTR_T, pread[0]) + hwrite = rffi.cast(rffi.INTPTR_T, pwrite[0]) lltype.free(pwrite, flavor='raw') lltype.free(pread, flavor='raw') if error: Modified: pypy/trunk/pypy/rpython/module/ll_win32file.py ============================================================================== --- pypy/trunk/pypy/rpython/module/ll_win32file.py (original) +++ pypy/trunk/pypy/rpython/module/ll_win32file.py Thu Sep 9 16:52:18 2010 @@ -265,7 +265,8 @@ hFile = CreateFile(path, FILE_WRITE_ATTRIBUTES, 0, None, OPEN_EXISTING, - FILE_FLAG_BACKUP_SEMANTICS, 0) + FILE_FLAG_BACKUP_SEMANTICS, + rwin32.NULL_HANDLE) if hFile == rwin32.INVALID_HANDLE_VALUE: raise rwin32.lastWindowsError() ctime = lltype.nullptr(rwin32.FILETIME) Modified: pypy/trunk/pypy/translator/c/database.py ============================================================================== --- pypy/trunk/pypy/translator/c/database.py (original) +++ pypy/trunk/pypy/translator/c/database.py Thu Sep 9 16:52:18 2010 @@ -2,7 +2,7 @@ Primitive, Ptr, typeOf, RuntimeTypeInfo, \ Struct, Array, FuncType, PyObject, Void, \ ContainerType, OpaqueType, FixedSizeArray, _uninitialized -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.lltypesystem.llmemory import WeakRef, _WeakRefType, GCREF from pypy.rpython.lltypesystem.rffi import CConstant from pypy.rpython.lltypesystem import llgroup @@ -183,6 +183,12 @@ if isinstance(T, Primitive) or T == GCREF: return PrimitiveName[T](obj, self) elif isinstance(T, Ptr): + if (isinstance(T.TO, OpaqueType) and + T.TO.hints.get('c_pointer_typedef') is not None): + if obj._obj is not None: + value = rffi.cast(rffi.SSIZE_T, obj) + return '((%s) %s)' % (cdecl(self.gettype(T), ''), + self.get(value)) if obj: # test if the ptr is non-NULL try: container = obj._obj From afa at codespeak.net Thu Sep 9 18:02:38 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Thu, 9 Sep 2010 18:02:38 +0200 (CEST) Subject: [pypy-svn] r76975 - pypy/trunk/pypy/module/_ssl Message-ID: <20100909160238.D85BF282B9C@codespeak.net> Author: afa Date: Thu Sep 9 18:02:37 2010 New Revision: 76975 Modified: pypy/trunk/pypy/module/_ssl/interp_ssl.py Log: Use opaque pointers to SSL structures (instead of rffi.VOIDP which becomes char*) This remove almost all compilation warnings in interp_ssl. Modified: pypy/trunk/pypy/module/_ssl/interp_ssl.py ============================================================================== --- pypy/trunk/pypy/module/_ssl/interp_ssl.py (original) +++ pypy/trunk/pypy/module/_ssl/interp_ssl.py Thu Sep 9 18:02:37 2010 @@ -18,6 +18,9 @@ # need of winsock2. Remove this when separate compilation is # available... 'winsock2.h', + # wincrypt.h defines X509_NAME, include it here + # so that openssl/ssl.h can repair this nonsense. + 'wincrypt.h', 'openssl/ssl.h', 'openssl/err.h'] else: @@ -88,18 +91,12 @@ globals()[k] = v # opaque structures -SSL_METHOD = rffi.VOIDP -SSL_CTX = rffi.VOIDP -SSL = rffi.VOIDP -BIO = rffi.VOIDP -X509 = rffi.VOIDP -X509_NAME = rffi.VOIDP - -SSL_CTX_P = rffi.CArrayPtr(SSL_CTX) -BIO_P = rffi.CArrayPtr(BIO) -SSL_P = rffi.CArrayPtr(SSL) -X509_P = rffi.CArrayPtr(X509) -X509_NAME_P = rffi.CArrayPtr(X509_NAME) +SSL_METHOD = rffi.COpaquePtr('SSL_METHOD') +SSL_CTX = rffi.COpaquePtr('SSL_CTX') +SSL = rffi.COpaquePtr('SSL') +BIO = rffi.COpaquePtr('BIO') +X509 = rffi.COpaquePtr('X509') +X509_NAME = rffi.COpaquePtr('X509_NAME') HAVE_OPENSSL_RAND = OPENSSL_VERSION_NUMBER >= 0x0090500f @@ -125,33 +122,33 @@ ssl_external('RAND_add', [rffi.CCHARP, rffi.INT, rffi.DOUBLE], lltype.Void) ssl_external('RAND_status', [], rffi.INT) ssl_external('RAND_egd', [rffi.CCHARP], rffi.INT) -ssl_external('SSL_CTX_new', [rffi.CArrayPtr(SSL_METHOD)], SSL_CTX_P) -ssl_external('SSLv23_method', [], rffi.CArrayPtr(SSL_METHOD)) -ssl_external('SSL_CTX_use_PrivateKey_file', [SSL_CTX_P, rffi.CCHARP, rffi.INT], rffi.INT) -ssl_external('SSL_CTX_use_certificate_chain_file', [SSL_CTX_P, rffi.CCHARP], rffi.INT) -ssl_external('SSL_CTX_ctrl', [SSL_CTX_P, rffi.INT, rffi.INT, rffi.VOIDP], rffi.INT) -ssl_external('SSL_CTX_set_verify', [SSL_CTX_P, rffi.INT, rffi.VOIDP], lltype.Void) -ssl_external('SSL_new', [SSL_CTX_P], SSL_P) -ssl_external('SSL_set_fd', [SSL_P, rffi.INT], rffi.INT) -ssl_external('BIO_ctrl', [BIO_P, rffi.INT, rffi.INT, rffi.VOIDP], rffi.INT) -ssl_external('SSL_get_rbio', [SSL_P], BIO_P) -ssl_external('SSL_get_wbio', [SSL_P], BIO_P) -ssl_external('SSL_set_connect_state', [SSL_P], lltype.Void) -ssl_external('SSL_connect', [SSL_P], rffi.INT) -ssl_external('SSL_get_error', [SSL_P, rffi.INT], rffi.INT) +ssl_external('SSL_CTX_new', [SSL_METHOD], SSL_CTX) +ssl_external('SSLv23_method', [], SSL_METHOD) +ssl_external('SSL_CTX_use_PrivateKey_file', [SSL_CTX, rffi.CCHARP, rffi.INT], rffi.INT) +ssl_external('SSL_CTX_use_certificate_chain_file', [SSL_CTX, rffi.CCHARP], rffi.INT) +ssl_external('SSL_CTX_ctrl', [SSL_CTX, rffi.INT, rffi.INT, rffi.VOIDP], rffi.INT) +ssl_external('SSL_CTX_set_verify', [SSL_CTX, rffi.INT, rffi.VOIDP], lltype.Void) +ssl_external('SSL_new', [SSL_CTX], SSL) +ssl_external('SSL_set_fd', [SSL, rffi.INT], rffi.INT) +ssl_external('BIO_ctrl', [BIO, rffi.INT, rffi.INT, rffi.VOIDP], rffi.INT) +ssl_external('SSL_get_rbio', [SSL], BIO) +ssl_external('SSL_get_wbio', [SSL], BIO) +ssl_external('SSL_set_connect_state', [SSL], lltype.Void) +ssl_external('SSL_connect', [SSL], rffi.INT) +ssl_external('SSL_get_error', [SSL, rffi.INT], rffi.INT) ssl_external('ERR_get_error', [], rffi.INT) ssl_external('ERR_error_string', [rffi.ULONG, rffi.CCHARP], rffi.CCHARP) -ssl_external('SSL_get_peer_certificate', [SSL_P], X509_P) -ssl_external('X509_get_subject_name', [X509_P], X509_NAME_P) -ssl_external('X509_get_issuer_name', [X509_P], X509_NAME_P) -ssl_external('X509_NAME_oneline', [X509_NAME_P, rffi.CCHARP, rffi.INT], rffi.CCHARP) -ssl_external('X509_free', [X509_P], lltype.Void) -ssl_external('SSL_free', [SSL_P], lltype.Void) -ssl_external('SSL_CTX_free', [SSL_CTX_P], lltype.Void) -ssl_external('SSL_write', [SSL_P, rffi.CCHARP, rffi.INT], rffi.INT) -ssl_external('SSL_pending', [SSL_P], rffi.INT) -ssl_external('SSL_read', [SSL_P, rffi.CCHARP, rffi.INT], rffi.INT) +ssl_external('SSL_get_peer_certificate', [SSL], X509) +ssl_external('X509_get_subject_name', [X509], X509_NAME) +ssl_external('X509_get_issuer_name', [X509], X509_NAME) +ssl_external('X509_NAME_oneline', [X509_NAME, rffi.CCHARP, rffi.INT], rffi.CCHARP) +ssl_external('X509_free', [X509], lltype.Void) +ssl_external('SSL_free', [SSL], lltype.Void) +ssl_external('SSL_CTX_free', [SSL_CTX], lltype.Void) +ssl_external('SSL_write', [SSL, rffi.CCHARP, rffi.INT], rffi.INT) +ssl_external('SSL_pending', [SSL], rffi.INT) +ssl_external('SSL_read', [SSL, rffi.CCHARP, rffi.INT], rffi.INT) def ssl_error(space, msg): w_module = space.getbuiltinmodule('_ssl') @@ -212,9 +209,9 @@ def __init__(self, space): self.space = space self.w_socket = None - self.ctx = lltype.nullptr(SSL_CTX_P.TO) - self.ssl = lltype.nullptr(SSL_P.TO) - self.server_cert = lltype.nullptr(X509_P.TO) + self.ctx = lltype.nullptr(SSL_CTX.TO) + self.ssl = lltype.nullptr(SSL.TO) + self.server_cert = lltype.nullptr(X509.TO) self._server = lltype.malloc(rffi.CCHARP.TO, X509_NAME_MAXLEN, flavor='raw') self._server[0] = '\0' self._issuer = lltype.malloc(rffi.CCHARP.TO, X509_NAME_MAXLEN, flavor='raw') From hakanardo at codespeak.net Thu Sep 9 18:10:23 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Thu, 9 Sep 2010 18:10:23 +0200 (CEST) Subject: [pypy-svn] r76976 - in pypy/branch/jit-bounds: . pypy/config pypy/jit/backend/llsupport pypy/jit/backend/llsupport/test pypy/jit/backend/test pypy/jit/backend/x86 pypy/jit/backend/x86/test pypy/jit/metainterp pypy/jit/metainterp/optimizeopt pypy/module/_ssl pypy/module/_winreg pypy/module/array/benchmark pypy/module/array/test pypy/module/pypyjit/test pypy/objspace/flow pypy/rlib pypy/rpython/lltypesystem pypy/rpython/lltypesystem/test pypy/rpython/memory pypy/rpython/memory/gctransform pypy/rpython/module pypy/rpython/test pypy/rpython/tool pypy/rpython/tool/test pypy/translator/c pypy/translator/c/gcc pypy/translator/c/gcc/test pypy/translator/c/gcc/test/elf64 pypy/translator/platform Message-ID: <20100909161023.9554D282B9C@codespeak.net> Author: hakanardo Date: Thu Sep 9 18:10:18 2010 New Revision: 76976 Added: pypy/branch/jit-bounds/pypy/translator/c/gcc/test/elf64/ - copied from r76975, pypy/trunk/pypy/translator/c/gcc/test/elf64/ Modified: pypy/branch/jit-bounds/ (props changed) pypy/branch/jit-bounds/pypy/config/translationoption.py pypy/branch/jit-bounds/pypy/jit/backend/llsupport/gc.py pypy/branch/jit-bounds/pypy/jit/backend/llsupport/test/test_gc.py pypy/branch/jit-bounds/pypy/jit/backend/test/runner_test.py pypy/branch/jit-bounds/pypy/jit/backend/x86/assembler.py pypy/branch/jit-bounds/pypy/jit/backend/x86/regalloc.py pypy/branch/jit-bounds/pypy/jit/backend/x86/regloc.py pypy/branch/jit-bounds/pypy/jit/backend/x86/rx86.py pypy/branch/jit-bounds/pypy/jit/backend/x86/test/test_gc_integration.py pypy/branch/jit-bounds/pypy/jit/backend/x86/test/test_zrpy_gc.py pypy/branch/jit-bounds/pypy/jit/backend/x86/test/test_ztranslation.py pypy/branch/jit-bounds/pypy/jit/metainterp/jitdriver.py pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py (props changed) pypy/branch/jit-bounds/pypy/jit/metainterp/warmspot.py pypy/branch/jit-bounds/pypy/module/_ssl/interp_ssl.py pypy/branch/jit-bounds/pypy/module/_winreg/interp_winreg.py pypy/branch/jit-bounds/pypy/module/array/benchmark/Makefile (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/intimg.c (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/intimgtst.c (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/intimgtst.py (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/loop.c (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/sum.c (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/sumtst.c (props changed) pypy/branch/jit-bounds/pypy/module/array/benchmark/sumtst.py (props changed) pypy/branch/jit-bounds/pypy/module/array/test/test_array_old.py (props changed) pypy/branch/jit-bounds/pypy/module/pypyjit/test/test_pypy_c.py pypy/branch/jit-bounds/pypy/objspace/flow/specialcase.py pypy/branch/jit-bounds/pypy/rlib/rmmap.py pypy/branch/jit-bounds/pypy/rlib/rsha.py pypy/branch/jit-bounds/pypy/rlib/rwin32.py pypy/branch/jit-bounds/pypy/rlib/rzipfile.py pypy/branch/jit-bounds/pypy/rpython/lltypesystem/rffi.py pypy/branch/jit-bounds/pypy/rpython/lltypesystem/test/test_rffi.py pypy/branch/jit-bounds/pypy/rpython/memory/gctransform/asmgcroot.py pypy/branch/jit-bounds/pypy/rpython/memory/gctypelayout.py pypy/branch/jit-bounds/pypy/rpython/module/ll_os.py pypy/branch/jit-bounds/pypy/rpython/module/ll_os_stat.py pypy/branch/jit-bounds/pypy/rpython/module/ll_win32file.py pypy/branch/jit-bounds/pypy/rpython/test/test_rint.py pypy/branch/jit-bounds/pypy/rpython/tool/rffi_platform.py pypy/branch/jit-bounds/pypy/rpython/tool/test/test_rffi_platform.py pypy/branch/jit-bounds/pypy/translator/c/database.py pypy/branch/jit-bounds/pypy/translator/c/gcc/instruction.py pypy/branch/jit-bounds/pypy/translator/c/gcc/test/conftest.py pypy/branch/jit-bounds/pypy/translator/c/gcc/test/test_trackgcroot.py pypy/branch/jit-bounds/pypy/translator/c/gcc/trackgcroot.py pypy/branch/jit-bounds/pypy/translator/platform/linux.py pypy/branch/jit-bounds/pypy/translator/platform/posix.py Log: svn merge -r76924:r76975 svn+ssh://hakanardo at codespeak.net/svn/pypy/trunk Modified: pypy/branch/jit-bounds/pypy/config/translationoption.py ============================================================================== --- pypy/branch/jit-bounds/pypy/config/translationoption.py (original) +++ pypy/branch/jit-bounds/pypy/config/translationoption.py Thu Sep 9 18:10:18 2010 @@ -343,11 +343,7 @@ } def final_check_config(config): - # For now, 64-bit JIT requires boehm. You have to say it explicitly - # with --gc=boehm, so that you don't get boehm by mistake. - if IS_64_BITS: - if config.translation.jit and config.translation.gc != 'boehm': - raise ConfigError("for now, 64-bit JIT requires --gc=boehm") + pass def set_opt_level(config, level): """Apply optimization suggestions on the 'config'. Modified: pypy/branch/jit-bounds/pypy/jit/backend/llsupport/gc.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/backend/llsupport/gc.py (original) +++ pypy/branch/jit-bounds/pypy/jit/backend/llsupport/gc.py Thu Sep 9 18:10:18 2010 @@ -251,13 +251,25 @@ if oldgcmap: lltype.free(oldgcmap, flavor='raw') - def get_basic_shape(self): - return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 4(%ebp) - chr(self.LOC_EBP_MINUS | 4), # saved %ebx: at -4(%ebp) - chr(self.LOC_EBP_MINUS | 8), # saved %esi: at -8(%ebp) - chr(self.LOC_EBP_MINUS | 12), # saved %edi: at -12(%ebp) - chr(self.LOC_EBP_PLUS | 0), # saved %ebp: at (%ebp) - chr(0)] + def get_basic_shape(self, is_64_bit=False): + # XXX: Should this code even really know about stack frame layout of + # the JIT? + if is_64_bit: + return [chr(self.LOC_EBP_PLUS | 8), + chr(self.LOC_EBP_MINUS | 8), + chr(self.LOC_EBP_MINUS | 16), + chr(self.LOC_EBP_MINUS | 24), + chr(self.LOC_EBP_MINUS | 32), + chr(self.LOC_EBP_MINUS | 40), + chr(self.LOC_EBP_PLUS | 0), + chr(0)] + else: + return [chr(self.LOC_EBP_PLUS | 4), # return addr: at 4(%ebp) + chr(self.LOC_EBP_MINUS | 4), # saved %ebx: at -4(%ebp) + chr(self.LOC_EBP_MINUS | 8), # saved %esi: at -8(%ebp) + chr(self.LOC_EBP_MINUS | 12), # saved %edi: at -12(%ebp) + chr(self.LOC_EBP_PLUS | 0), # saved %ebp: at (%ebp) + chr(0)] def _encode_num(self, shape, number): assert number >= 0 @@ -276,17 +288,9 @@ num = self.LOC_EBP_MINUS | (-offset) self._encode_num(shape, num) - def add_ebx(self, shape): - shape.append(chr(self.LOC_REG | 4)) - - def add_esi(self, shape): - shape.append(chr(self.LOC_REG | 8)) - - def add_edi(self, shape): - shape.append(chr(self.LOC_REG | 12)) - - def add_ebp(self, shape): - shape.append(chr(self.LOC_REG | 16)) + def add_callee_save_reg(self, shape, reg_index): + assert reg_index > 0 + shape.append(chr(self.LOC_REG | (reg_index << 2))) def compress_callshape(self, shape): # Similar to compress_callshape() in trackgcroot.py. Modified: pypy/branch/jit-bounds/pypy/jit/backend/llsupport/test/test_gc.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/backend/llsupport/test/test_gc.py (original) +++ pypy/branch/jit-bounds/pypy/jit/backend/llsupport/test/test_gc.py Thu Sep 9 18:10:18 2010 @@ -73,16 +73,16 @@ gcrootmap.add_ebp_offset(shape, num1) gcrootmap.add_ebp_offset(shape, num2) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a]) - gcrootmap.add_ebx(shape) + gcrootmap.add_callee_save_reg(shape, 1) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a, 4]) - gcrootmap.add_esi(shape) + gcrootmap.add_callee_save_reg(shape, 2) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a, 4, 8]) - gcrootmap.add_edi(shape) + gcrootmap.add_callee_save_reg(shape, 3) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a, 4, 8, 12]) - gcrootmap.add_ebp(shape) + gcrootmap.add_callee_save_reg(shape, 4) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a, 4, 8, 12, 16]) # Modified: pypy/branch/jit-bounds/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/backend/test/runner_test.py (original) +++ pypy/branch/jit-bounds/pypy/jit/backend/test/runner_test.py Thu Sep 9 18:10:18 2010 @@ -1777,7 +1777,7 @@ self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 RES = lltype.Signed - self.cpu.portal_calldescr = self.cpu.calldescrof( + FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES) for i in range(10): self.cpu.set_future_value_int(i, i+1) @@ -1816,7 +1816,7 @@ ARGS = [lltype.Float, lltype.Float] RES = lltype.Float - self.cpu.portal_calldescr = self.cpu.calldescrof( + FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES) ops = ''' Modified: pypy/branch/jit-bounds/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/backend/x86/assembler.py (original) +++ pypy/branch/jit-bounds/pypy/jit/backend/x86/assembler.py Thu Sep 9 18:10:18 2010 @@ -273,7 +273,8 @@ if IS_X86_32: self.mc.MOV_sr(WORD, edx.value) # save it as the new argument elif IS_X86_64: - # FIXME: We can't just clobber rdi like this, can we? + # rdi can be clobbered: its content was forced to the stack + # by _fastpath_malloc(), like all other save_around_call_regs. self.mc.MOV_rr(edi.value, edx.value) addr = self.cpu.gc_ll_descr.get_malloc_fixedsize_slowpath_addr() @@ -1256,8 +1257,12 @@ sizeof_ti = rffi.sizeof(GCData.TYPE_INFO) type_info_group = llop.gc_get_type_info_group(llmemory.Address) type_info_group = rffi.cast(lltype.Signed, type_info_group) - expected_typeid = (classptr - sizeof_ti - type_info_group) >> 2 - self.mc.CMP16(mem(locs[0], 0), ImmedLoc(expected_typeid)) + expected_typeid = classptr - sizeof_ti - type_info_group + if IS_X86_32: + expected_typeid >>= 2 + self.mc.CMP16(mem(locs[0], 0), ImmedLoc(expected_typeid)) + elif IS_X86_64: + self.mc.CMP32_mi((locs[0].value, 0), expected_typeid) def genop_guard_guard_class(self, ign_1, guard_op, guard_token, locs, ign_2): self.mc.ensure_bytes_available(256) Modified: pypy/branch/jit-bounds/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/branch/jit-bounds/pypy/jit/backend/x86/regalloc.py Thu Sep 9 18:10:18 2010 @@ -26,6 +26,12 @@ no_lower_byte_regs = [esi, edi] save_around_call_regs = [eax, edx, ecx] + REGLOC_TO_GCROOTMAP_REG_INDEX = { + ebx: 1, + esi: 2, + edi: 3, + } + def call_result_location(self, v): return eax @@ -47,6 +53,13 @@ no_lower_byte_regs = [] save_around_call_regs = [eax, ecx, edx, esi, edi, r8, r9, r10] + REGLOC_TO_GCROOTMAP_REG_INDEX = { + ebx: 1, + r12: 2, + r13: 3, + r14: 4, + r15: 5, + } class FloatConstants(object): BASE_CONSTANT_SIZE = 1000 @@ -652,13 +665,11 @@ self._consider_call(op, guard_op) def consider_call_assembler(self, op, guard_op): - portal_calldescr = self.assembler.cpu.portal_calldescr - size = portal_calldescr.get_result_size(self.translate_support_code) - # descr = op.descr assert isinstance(descr, LoopToken) jd = descr.outermost_jitdriver_sd assert jd is not None + size = jd.portal_calldescr.get_result_size(self.translate_support_code) vable_index = jd.index_of_virtualizable if vable_index >= 0: self.rm._sync_var(op.args[vable_index]) @@ -694,23 +705,18 @@ def _fastpath_malloc(self, op, descr): assert isinstance(descr, BaseSizeDescr) gc_ll_descr = self.assembler.cpu.gc_ll_descr - tmp0 = TempBox() self.rm.force_allocate_reg(op.result, selected_reg=eax) - self.rm.force_allocate_reg(tmp0, selected_reg=edx) - # XXX about the next 10 lines: why not just say - # force_allocate_reg(tmp1, selected_reg=ecx)????? - for v, reg in self.rm.reg_bindings.items(): - if reg is ecx: - to_sync = v - break - else: - to_sync = None - if to_sync is not None: - self.rm._sync_var(to_sync) - del self.rm.reg_bindings[to_sync] - self.rm.free_regs.append(ecx) - # we need to do it here, so edx is not in reg_bindings - self.rm.possibly_free_var(tmp0) + # We need to force-allocate each of save_around_call_regs now. + # The alternative would be to save and restore them around the + # actual call to malloc(), in the rare case where we need to do + # it; however, mark_gc_roots() would need to be adapted to know + # where the variables end up being saved. Messy. + for reg in self.rm.save_around_call_regs: + if reg is not eax: + tmp_box = TempBox() + self.rm.force_allocate_reg(tmp_box, selected_reg=reg) + self.rm.possibly_free_var(tmp_box) + self.assembler.malloc_cond_fixedsize( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), @@ -962,7 +968,7 @@ pass def get_mark_gc_roots(self, gcrootmap): - shape = gcrootmap.get_basic_shape() + shape = gcrootmap.get_basic_shape(IS_X86_64) for v, val in self.fm.frame_bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) @@ -971,15 +977,8 @@ if reg is eax: continue # ok to ignore this one if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): - if reg is ebx: - gcrootmap.add_ebx(shape) - elif reg is esi: - gcrootmap.add_esi(shape) - elif reg is edi: - gcrootmap.add_edi(shape) - else: - print "[get_mark_gc_roots] bogus register", reg - assert False + assert reg in self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX + gcrootmap.add_callee_save_reg(shape, self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX[reg]) return gcrootmap.compress_callshape(shape) def consider_force_token(self, op): Modified: pypy/branch/jit-bounds/pypy/jit/backend/x86/regloc.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/backend/x86/regloc.py (original) +++ pypy/branch/jit-bounds/pypy/jit/backend/x86/regloc.py Thu Sep 9 18:10:18 2010 @@ -1,7 +1,7 @@ from pypy.jit.metainterp.history import AbstractValue, ConstInt from pypy.jit.backend.x86 import rx86 from pypy.rlib.unroll import unrolling_iterable -from pypy.jit.backend.x86.arch import WORD +from pypy.jit.backend.x86.arch import WORD, IS_X86_32, IS_X86_64 from pypy.tool.sourcetools import func_with_new_name from pypy.rlib.objectmodel import specialize from pypy.rlib.rarithmetic import intmask Modified: pypy/branch/jit-bounds/pypy/jit/backend/x86/rx86.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/backend/x86/rx86.py (original) +++ pypy/branch/jit-bounds/pypy/jit/backend/x86/rx86.py Thu Sep 9 18:10:18 2010 @@ -462,6 +462,8 @@ CMP_ji = select_8_or_32_bit_immed(CMP_ji8, CMP_ji32) CMP_rj = insn(rex_w, '\x3B', register(1, 8), '\x05', immediate(2)) + CMP32_mi = insn(rex_nw, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) + AND8_rr = insn(rex_w, '\x20', byte_register(1), byte_register(2,8), '\xC0') OR8_rr = insn(rex_w, '\x08', byte_register(1), byte_register(2,8), '\xC0') Modified: pypy/branch/jit-bounds/pypy/jit/backend/x86/test/test_gc_integration.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/backend/x86/test/test_gc_integration.py (original) +++ pypy/branch/jit-bounds/pypy/jit/backend/x86/test/test_gc_integration.py Thu Sep 9 18:10:18 2010 @@ -26,16 +26,13 @@ CPU = getcpuclass() class MockGcRootMap(object): - def get_basic_shape(self): + def get_basic_shape(self, is_64_bit): return ['shape'] def add_ebp_offset(self, shape, offset): shape.append(offset) - def add_ebx(self, shape): - shape.append('ebx') - def add_esi(self, shape): - shape.append('esi') - def add_edi(self, shape): - shape.append('edi') + def add_callee_save_reg(self, shape, reg_index): + index_to_name = { 1: 'ebx', 2: 'esi', 3: 'edi' } + shape.append(index_to_name[reg_index]) def compress_callshape(self, shape): assert shape[0] == 'shape' return ['compressed'] + shape[1:] Modified: pypy/branch/jit-bounds/pypy/jit/backend/x86/test/test_zrpy_gc.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/backend/x86/test/test_zrpy_gc.py (original) +++ pypy/branch/jit-bounds/pypy/jit/backend/x86/test/test_zrpy_gc.py Thu Sep 9 18:10:18 2010 @@ -128,10 +128,6 @@ class TestCompileHybrid(object): def setup_class(cls): - if IS_X86_64: - # No hybrid GC on 64-bit for the time being - py.test.skip() - funcs = [] name_to_func = {} for fullname in dir(cls): Modified: pypy/branch/jit-bounds/pypy/jit/backend/x86/test/test_ztranslation.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/backend/x86/test/test_ztranslation.py (original) +++ pypy/branch/jit-bounds/pypy/jit/backend/x86/test/test_ztranslation.py Thu Sep 9 18:10:18 2010 @@ -125,10 +125,6 @@ return t def test_external_exception_handling_translates(self): - # FIXME - if IS_X86_64: - import py.test; py.test.skip() - jitdriver = JitDriver(greens = [], reds = ['n', 'total']) class ImDone(Exception): Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/jitdriver.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/jitdriver.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/jitdriver.py Thu Sep 9 18:10:18 2010 @@ -8,6 +8,7 @@ # self.portal_graph ... pypy.jit.metainterp.warmspot # self.portal_runner_ptr ... pypy.jit.metainterp.warmspot # self.portal_runner_adr ... pypy.jit.metainterp.warmspot + # self.portal_calldescr ... pypy.jit.metainterp.warmspot # self.num_green_args ... pypy.jit.metainterp.warmspot # self.result_type ... pypy.jit.metainterp.warmspot # self.virtualizable_info... pypy.jit.metainterp.warmspot Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/warmspot.py ============================================================================== --- pypy/branch/jit-bounds/pypy/jit/metainterp/warmspot.py (original) +++ pypy/branch/jit-bounds/pypy/jit/metainterp/warmspot.py Thu Sep 9 18:10:18 2010 @@ -648,7 +648,7 @@ jd.portal_runner_ptr = self.helper_func(jd._PTR_PORTAL_FUNCTYPE, ll_portal_runner) jd.portal_runner_adr = llmemory.cast_ptr_to_adr(jd.portal_runner_ptr) - self.cpu.portal_calldescr = self.cpu.calldescrof( + jd.portal_calldescr = self.cpu.calldescrof( jd._PTR_PORTAL_FUNCTYPE.TO, jd._PTR_PORTAL_FUNCTYPE.TO.ARGS, jd._PTR_PORTAL_FUNCTYPE.TO.RESULT) Modified: pypy/branch/jit-bounds/pypy/module/_ssl/interp_ssl.py ============================================================================== --- pypy/branch/jit-bounds/pypy/module/_ssl/interp_ssl.py (original) +++ pypy/branch/jit-bounds/pypy/module/_ssl/interp_ssl.py Thu Sep 9 18:10:18 2010 @@ -18,6 +18,9 @@ # need of winsock2. Remove this when separate compilation is # available... 'winsock2.h', + # wincrypt.h defines X509_NAME, include it here + # so that openssl/ssl.h can repair this nonsense. + 'wincrypt.h', 'openssl/ssl.h', 'openssl/err.h'] else: @@ -88,18 +91,12 @@ globals()[k] = v # opaque structures -SSL_METHOD = rffi.VOIDP -SSL_CTX = rffi.VOIDP -SSL = rffi.VOIDP -BIO = rffi.VOIDP -X509 = rffi.VOIDP -X509_NAME = rffi.VOIDP - -SSL_CTX_P = rffi.CArrayPtr(SSL_CTX) -BIO_P = rffi.CArrayPtr(BIO) -SSL_P = rffi.CArrayPtr(SSL) -X509_P = rffi.CArrayPtr(X509) -X509_NAME_P = rffi.CArrayPtr(X509_NAME) +SSL_METHOD = rffi.COpaquePtr('SSL_METHOD') +SSL_CTX = rffi.COpaquePtr('SSL_CTX') +SSL = rffi.COpaquePtr('SSL') +BIO = rffi.COpaquePtr('BIO') +X509 = rffi.COpaquePtr('X509') +X509_NAME = rffi.COpaquePtr('X509_NAME') HAVE_OPENSSL_RAND = OPENSSL_VERSION_NUMBER >= 0x0090500f @@ -125,33 +122,33 @@ ssl_external('RAND_add', [rffi.CCHARP, rffi.INT, rffi.DOUBLE], lltype.Void) ssl_external('RAND_status', [], rffi.INT) ssl_external('RAND_egd', [rffi.CCHARP], rffi.INT) -ssl_external('SSL_CTX_new', [rffi.CArrayPtr(SSL_METHOD)], SSL_CTX_P) -ssl_external('SSLv23_method', [], rffi.CArrayPtr(SSL_METHOD)) -ssl_external('SSL_CTX_use_PrivateKey_file', [SSL_CTX_P, rffi.CCHARP, rffi.INT], rffi.INT) -ssl_external('SSL_CTX_use_certificate_chain_file', [SSL_CTX_P, rffi.CCHARP], rffi.INT) -ssl_external('SSL_CTX_ctrl', [SSL_CTX_P, rffi.INT, rffi.INT, rffi.VOIDP], rffi.INT) -ssl_external('SSL_CTX_set_verify', [SSL_CTX_P, rffi.INT, rffi.VOIDP], lltype.Void) -ssl_external('SSL_new', [SSL_CTX_P], SSL_P) -ssl_external('SSL_set_fd', [SSL_P, rffi.INT], rffi.INT) -ssl_external('BIO_ctrl', [BIO_P, rffi.INT, rffi.INT, rffi.VOIDP], rffi.INT) -ssl_external('SSL_get_rbio', [SSL_P], BIO_P) -ssl_external('SSL_get_wbio', [SSL_P], BIO_P) -ssl_external('SSL_set_connect_state', [SSL_P], lltype.Void) -ssl_external('SSL_connect', [SSL_P], rffi.INT) -ssl_external('SSL_get_error', [SSL_P, rffi.INT], rffi.INT) +ssl_external('SSL_CTX_new', [SSL_METHOD], SSL_CTX) +ssl_external('SSLv23_method', [], SSL_METHOD) +ssl_external('SSL_CTX_use_PrivateKey_file', [SSL_CTX, rffi.CCHARP, rffi.INT], rffi.INT) +ssl_external('SSL_CTX_use_certificate_chain_file', [SSL_CTX, rffi.CCHARP], rffi.INT) +ssl_external('SSL_CTX_ctrl', [SSL_CTX, rffi.INT, rffi.INT, rffi.VOIDP], rffi.INT) +ssl_external('SSL_CTX_set_verify', [SSL_CTX, rffi.INT, rffi.VOIDP], lltype.Void) +ssl_external('SSL_new', [SSL_CTX], SSL) +ssl_external('SSL_set_fd', [SSL, rffi.INT], rffi.INT) +ssl_external('BIO_ctrl', [BIO, rffi.INT, rffi.INT, rffi.VOIDP], rffi.INT) +ssl_external('SSL_get_rbio', [SSL], BIO) +ssl_external('SSL_get_wbio', [SSL], BIO) +ssl_external('SSL_set_connect_state', [SSL], lltype.Void) +ssl_external('SSL_connect', [SSL], rffi.INT) +ssl_external('SSL_get_error', [SSL, rffi.INT], rffi.INT) ssl_external('ERR_get_error', [], rffi.INT) ssl_external('ERR_error_string', [rffi.ULONG, rffi.CCHARP], rffi.CCHARP) -ssl_external('SSL_get_peer_certificate', [SSL_P], X509_P) -ssl_external('X509_get_subject_name', [X509_P], X509_NAME_P) -ssl_external('X509_get_issuer_name', [X509_P], X509_NAME_P) -ssl_external('X509_NAME_oneline', [X509_NAME_P, rffi.CCHARP, rffi.INT], rffi.CCHARP) -ssl_external('X509_free', [X509_P], lltype.Void) -ssl_external('SSL_free', [SSL_P], lltype.Void) -ssl_external('SSL_CTX_free', [SSL_CTX_P], lltype.Void) -ssl_external('SSL_write', [SSL_P, rffi.CCHARP, rffi.INT], rffi.INT) -ssl_external('SSL_pending', [SSL_P], rffi.INT) -ssl_external('SSL_read', [SSL_P, rffi.CCHARP, rffi.INT], rffi.INT) +ssl_external('SSL_get_peer_certificate', [SSL], X509) +ssl_external('X509_get_subject_name', [X509], X509_NAME) +ssl_external('X509_get_issuer_name', [X509], X509_NAME) +ssl_external('X509_NAME_oneline', [X509_NAME, rffi.CCHARP, rffi.INT], rffi.CCHARP) +ssl_external('X509_free', [X509], lltype.Void) +ssl_external('SSL_free', [SSL], lltype.Void) +ssl_external('SSL_CTX_free', [SSL_CTX], lltype.Void) +ssl_external('SSL_write', [SSL, rffi.CCHARP, rffi.INT], rffi.INT) +ssl_external('SSL_pending', [SSL], rffi.INT) +ssl_external('SSL_read', [SSL, rffi.CCHARP, rffi.INT], rffi.INT) def ssl_error(space, msg): w_module = space.getbuiltinmodule('_ssl') @@ -212,9 +209,9 @@ def __init__(self, space): self.space = space self.w_socket = None - self.ctx = lltype.nullptr(SSL_CTX_P.TO) - self.ssl = lltype.nullptr(SSL_P.TO) - self.server_cert = lltype.nullptr(X509_P.TO) + self.ctx = lltype.nullptr(SSL_CTX.TO) + self.ssl = lltype.nullptr(SSL.TO) + self.server_cert = lltype.nullptr(X509.TO) self._server = lltype.malloc(rffi.CCHARP.TO, X509_NAME_MAXLEN, flavor='raw') self._server[0] = '\0' self._issuer = lltype.malloc(rffi.CCHARP.TO, X509_NAME_MAXLEN, flavor='raw') Modified: pypy/branch/jit-bounds/pypy/module/_winreg/interp_winreg.py ============================================================================== --- pypy/branch/jit-bounds/pypy/module/_winreg/interp_winreg.py (original) +++ pypy/branch/jit-bounds/pypy/module/_winreg/interp_winreg.py Thu Sep 9 18:10:18 2010 @@ -20,16 +20,19 @@ self.Close(space) descr_del.unwrap_spec = ['self', ObjSpace] + def as_int(self): + return rffi.cast(rffi.SIZE_T, self.hkey) + def descr_nonzero(self, space): - return space.wrap(self.hkey != 0) + return space.wrap(self.as_int() != 0) descr_nonzero.unwrap_spec = ['self', ObjSpace] def descr_repr(self, space): - return space.wrap("" % (self.hkey,)) + return space.wrap("" % (self.as_int(),)) descr_repr.unwrap_spec = ['self', ObjSpace] def descr_int(self, space): - return space.wrap(self.hkey) + return space.wrap(self.as_int()) descr_int.unwrap_spec = ['self', ObjSpace] def Close(self, space): @@ -49,12 +52,13 @@ need the underlying win32 handle to exist beyond the lifetime of the handle object. On 64 bit windows, the result of this function is a long integer""" - hkey = self.hkey - self.hkey = 0 - return space.wrap(hkey) + key = self.as_int() + self.hkey = rwin32.NULL_HANDLE + return space.wrap(key) Detach.unwrap_spec = ['self', ObjSpace] -def new_HKEY(space, w_subtype, hkey): +def new_HKEY(space, w_subtype, key): + hkey = rffi.cast(rwinreg.HKEY, key) return space.wrap(W_HKEY(hkey)) descr_HKEY_new = interp2app(new_HKEY, unwrap_spec=[ObjSpace, W_Root, int]) @@ -98,9 +102,9 @@ elif isinstance(w_hkey, W_HKEY): return w_hkey.hkey elif space.is_true(space.isinstance(w_hkey, space.w_int)): - return space.int_w(w_hkey) + return rffi.cast(rwinreg.HKEY, space.int_w(w_hkey)) elif space.is_true(space.isinstance(w_hkey, space.w_long)): - return space.uint_w(w_hkey) + return rffi.cast(rwinreg.HKEY, space.uint_w(w_hkey)) else: errstring = space.wrap("The object is not a PyHKEY object") raise OperationError(space.w_TypeError, errstring) @@ -631,8 +635,8 @@ null_dword, ft) if ret != 0: raiseWindowsError(space, ret, 'RegQueryInfoKey') - l = (ft[0].c_dwLowDateTime + - (ft[0].c_dwHighDateTime << 32)) + l = ((lltype.r_longlong(ft[0].c_dwHighDateTime) << 32) + + lltype.r_longlong(ft[0].c_dwLowDateTime)) return space.newtuple([space.wrap(nSubKeys[0]), space.wrap(nValues[0]), space.wrap(l)]) Modified: pypy/branch/jit-bounds/pypy/module/pypyjit/test/test_pypy_c.py ============================================================================== --- pypy/branch/jit-bounds/pypy/module/pypyjit/test/test_pypy_c.py (original) +++ pypy/branch/jit-bounds/pypy/module/pypyjit/test/test_pypy_c.py Thu Sep 9 18:10:18 2010 @@ -191,7 +191,7 @@ return r ''', 28, ([5], 120), - ([20], 2432902008176640000L)) + ([25], 15511210043330985984000000L)) def test_factorialrec(self): self.run_source(''' @@ -202,7 +202,7 @@ return 1 ''', 0, ([5], 120), - ([20], 2432902008176640000L)) + ([25], 15511210043330985984000000L)) def test_richards(self): self.run_source(''' Modified: pypy/branch/jit-bounds/pypy/objspace/flow/specialcase.py ============================================================================== --- pypy/branch/jit-bounds/pypy/objspace/flow/specialcase.py (original) +++ pypy/branch/jit-bounds/pypy/objspace/flow/specialcase.py Thu Sep 9 18:10:18 2010 @@ -3,6 +3,7 @@ from pypy.interpreter.gateway import ApplevelClass from pypy.interpreter.error import OperationError from pypy.tool.cache import Cache +from pypy.rlib.rarithmetic import r_uint import py def sc_import(space, fn, args): @@ -120,6 +121,14 @@ pass return space.do_operation('simple_call', Constant(func), *args_w) +def sc_r_uint(space, r_uint, args): + args_w, kwds_w = args.unpack() + assert not kwds_w + [w_value] = args_w + if isinstance(w_value, Constant): + return Constant(r_uint(w_value.value)) + return space.do_operation('simple_call', space.wrap(r_uint), w_value) + def setup(space): # fn = pyframe.normalize_exception.get_function(space) # this is now routed through the objspace, directly. @@ -131,3 +140,7 @@ # if possible for fn in OperationName: space.specialcases[fn] = sc_operator + # special case to constant-fold r_uint(32-bit-constant) + # (normally, the 32-bit constant is a long, and is not allowed to + # show up in the flow graphs at all) + space.specialcases[r_uint] = sc_r_uint Modified: pypy/branch/jit-bounds/pypy/rlib/rmmap.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rlib/rmmap.py (original) +++ pypy/branch/jit-bounds/pypy/rlib/rmmap.py Thu Sep 9 18:10:18 2010 @@ -72,6 +72,7 @@ setattr(CConfig, name, rffi_platform.ConstantInteger(name)) from pypy.rlib.rwin32 import HANDLE, LPHANDLE + from pypy.rlib.rwin32 import NULL_HANDLE, INVALID_HANDLE_VALUE from pypy.rlib.rwin32 import DWORD, WORD, DWORD_PTR, LPDWORD from pypy.rlib.rwin32 import BOOL, LPVOID, LPCVOID, LPCSTR, SIZE_T from pypy.rlib.rwin32 import INT, LONG, PLONG @@ -183,7 +184,7 @@ ##_get_osfhandle = winexternal('_get_osfhandle', [INT], LONG) # casting from int to handle did not work, so I changed this # but it should not be so! - _get_osfhandle = winexternal('_get_osfhandle', [INT], HANDLE) + _get_osfhandle = winexternal('_get_osfhandle', [INT], rffi.INTPTR_T) GetLastError = winexternal('GetLastError', [], DWORD) VirtualAlloc = winexternal('VirtualAlloc', [rffi.VOIDP, rffi.SIZE_T, DWORD, DWORD], @@ -228,8 +229,7 @@ def _get_error_no(): return rffi.cast(lltype.Signed, GetLastError()) - NULL_HANDLE = rffi.cast(HANDLE, 0) - INVALID_HANDLE = rffi.cast(HANDLE, -1) + INVALID_HANDLE = INVALID_HANDLE_VALUE PAGESIZE = _get_page_size() NULL = lltype.nullptr(PTR.TO) @@ -684,12 +684,11 @@ # assume -1 and 0 both mean invalid file descriptor # to 'anonymously' map memory. if fileno != -1 and fileno != 0: - fh = _get_osfhandle(fileno) - # parts of the C library use HANDLE, others just ints - # XXX hack - made _get_osfhandle compatible - if fh == INVALID_HANDLE: + res = _get_osfhandle(fileno) + if res == rffi.cast(rffi.SSIZE_T, INVALID_HANDLE): errno = _get_error_no() raise OSError(errno, os.strerror(errno)) + fh = rffi.cast(HANDLE, res) # Win9x appears to need us seeked to zero # SEEK_SET = 0 # libc._lseek(fileno, 0, SEEK_SET) Modified: pypy/branch/jit-bounds/pypy/rlib/rsha.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rlib/rsha.py (original) +++ pypy/branch/jit-bounds/pypy/rlib/rsha.py Thu Sep 9 18:10:18 2010 @@ -88,7 +88,7 @@ 0xCA62C1D6L # (60 <= t <= 79) ] -unroll_f_K = unrolling_iterable(zip(f, K)) +unroll_f_K = unrolling_iterable(zip(f, map(r_uint, K))) if UNROLL_ALL: unroll_range_20 = unrolling_iterable(range(20)) Modified: pypy/branch/jit-bounds/pypy/rlib/rwin32.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rlib/rwin32.py (original) +++ pypy/branch/jit-bounds/pypy/rlib/rwin32.py Thu Sep 9 18:10:18 2010 @@ -81,9 +81,10 @@ return rffi.llexternal(name, args, result, compilation_info=eci, calling_conv='win') if WIN32: - HANDLE = rffi.ULONG + HANDLE = rffi.COpaquePtr(typedef='HANDLE') LPHANDLE = rffi.CArrayPtr(HANDLE) HMODULE = HANDLE + NULL_HANDLE = rffi.cast(HANDLE, 0) INVALID_HANDLE_VALUE = rffi.cast(HANDLE, -1) PFILETIME = rffi.CArrayPtr(FILETIME) Modified: pypy/branch/jit-bounds/pypy/rlib/rzipfile.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rlib/rzipfile.py (original) +++ pypy/branch/jit-bounds/pypy/rlib/rzipfile.py Thu Sep 9 18:10:18 2010 @@ -19,12 +19,12 @@ def crc32(s, crc=0): result = 0 - crc = ~r_uint(crc) & 0xffffffffL + crc = ~r_uint(crc) & r_uint(0xffffffffL) for c in s: crc = rcrc_32_tab[(crc ^ r_uint(ord(c))) & 0xffL] ^ (crc >> 8) #/* Note: (crc >> 8) MUST zero fill on left - result = crc ^ 0xffffffffL + result = crc ^ r_uint(0xffffffffL) return result @@ -194,7 +194,7 @@ (x.create_version, x.create_system, x.extract_version, x.reserved, x.flag_bits, x.compress_type, t, d, crc, x.compress_size, x.file_size) = centdir[1:12] - x.CRC = r_uint(crc) & 0xffffffff + x.CRC = r_uint(crc) & r_uint(0xffffffff) x.dostime = t x.dosdate = d x.volume, x.internal_attr, x.external_attr = centdir[15:18] Modified: pypy/branch/jit-bounds/pypy/rpython/lltypesystem/rffi.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/lltypesystem/rffi.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/lltypesystem/rffi.py Thu Sep 9 18:10:18 2010 @@ -358,9 +358,11 @@ if os.name != 'nt': TYPES.append('mode_t') TYPES.append('pid_t') + TYPES.append('ssize_t') else: MODE_T = lltype.Signed PID_T = lltype.Signed + SSIZE_T = lltype.Signed def populate_inttypes(): names = [] @@ -415,6 +417,7 @@ # ULONGLONG r_ulonglong # WCHAR_T r_wchar_t # SIZE_T r_size_t +# SSIZE_T r_ssize_t # TIME_T r_time_t # -------------------------------------------------------------------- # Note that rffi.r_int is not necessarily the same as @@ -535,6 +538,8 @@ # (use SIGNEDCHAR or UCHAR for the small integer types) CHAR = lltype.Char +INTPTR_T = SSIZE_T + # double DOUBLE = lltype.Float Modified: pypy/branch/jit-bounds/pypy/rpython/lltypesystem/test/test_rffi.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/lltypesystem/test/test_rffi.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/lltypesystem/test/test_rffi.py Thu Sep 9 18:10:18 2010 @@ -186,6 +186,11 @@ def test_externvar(self): import os + if os.name == 'nt': + # Windows CRT badly aborts when an invalid fd is used. + bad_fd = 0 + else: + bad_fd = 12312312 def f(): set_errno(12) @@ -193,7 +198,7 @@ def g(): try: - os.write(12312312, "xxx") + os.write(bad_fd, "xxx") except OSError: pass return get_errno() Modified: pypy/branch/jit-bounds/pypy/rpython/memory/gctransform/asmgcroot.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/memory/gctransform/asmgcroot.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/memory/gctransform/asmgcroot.py Thu Sep 9 18:10:18 2010 @@ -18,6 +18,7 @@ # The .s file produced by GCC is then parsed by trackgcroot.py. # +IS_64_BITS = sys.maxint > 2147483647 class AsmGcRootFrameworkGCTransformer(FrameworkGCTransformer): _asmgcc_save_restore_arguments = None @@ -326,7 +327,7 @@ ll_assert(reg < CALLEE_SAVED_REGS, "bad register location") return callee.regs_stored_at[reg] elif kind == LOC_ESP_PLUS: # in the caller stack frame at N(%esp) - esp_in_caller = callee.frame_address + 4 + esp_in_caller = callee.frame_address + sizeofaddr return esp_in_caller + offset elif kind == LOC_EBP_PLUS: # in the caller stack frame at N(%ebp) ebp_in_caller = callee.regs_stored_at[INDEX_OF_EBP].address[0] @@ -415,11 +416,12 @@ key1 = addr1.address[0] key2 = addr2.address[0] if key1 < key2: - return -1 + result = -1 elif key1 == key2: - return 0 + result = 0 else: - return 1 + result = 1 + return rffi.cast(rffi.INT, result) # ____________________________________________________________ @@ -464,9 +466,15 @@ # - frame address (actually the addr of the retaddr of the current function; # that's the last word of the frame in memory) # -CALLEE_SAVED_REGS = 4 # there are 4 callee-saved registers -INDEX_OF_EBP = 3 -FRAME_PTR = CALLEE_SAVED_REGS # the frame is at index 4 in the array + +if IS_64_BITS: + CALLEE_SAVED_REGS = 6 + INDEX_OF_EBP = 5 + FRAME_PTR = CALLEE_SAVED_REGS +else: + CALLEE_SAVED_REGS = 4 # there are 4 callee-saved registers + INDEX_OF_EBP = 3 + FRAME_PTR = CALLEE_SAVED_REGS # the frame is at index 4 in the array ASM_CALLBACK_PTR = lltype.Ptr(lltype.FuncType([], lltype.Void)) Modified: pypy/branch/jit-bounds/pypy/rpython/memory/gctypelayout.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/memory/gctypelayout.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/memory/gctypelayout.py Thu Sep 9 18:10:18 2010 @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import ll_assert +from pypy.rlib.rarithmetic import intmask from pypy.tool.identity_dict import identity_dict @@ -122,8 +123,8 @@ T_HAS_GCPTR_IN_VARSIZE = 0x20000 T_IS_GCARRAY_OF_GCPTR = 0x40000 T_IS_WEAKREF = 0x80000 -T_KEY_MASK = 0xFF000000 -T_KEY_VALUE = 0x7A000000 # bug detection only +T_KEY_MASK = intmask(0xFF000000) +T_KEY_VALUE = intmask(0x7A000000) # bug detection only def _check_valid_type_info(p): ll_assert(p.infobits & T_KEY_MASK == T_KEY_VALUE, "invalid type_id") Modified: pypy/branch/jit-bounds/pypy/rpython/module/ll_os.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/module/ll_os.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/module/ll_os.py Thu Sep 9 18:10:18 2010 @@ -1046,7 +1046,7 @@ rffi.VOIDP, rwin32.DWORD], rwin32.BOOL) - _open_osfhandle = self.llexternal('_open_osfhandle', [rffi.ULONG, + _open_osfhandle = self.llexternal('_open_osfhandle', [rffi.INTPTR_T, rffi.INT], rffi.INT) null = lltype.nullptr(rffi.VOIDP.TO) @@ -1059,8 +1059,8 @@ error = 0 else: error = rwin32.GetLastError() - hread = pread[0] - hwrite = pwrite[0] + hread = rffi.cast(rffi.INTPTR_T, pread[0]) + hwrite = rffi.cast(rffi.INTPTR_T, pwrite[0]) lltype.free(pwrite, flavor='raw') lltype.free(pread, flavor='raw') if error: Modified: pypy/branch/jit-bounds/pypy/rpython/module/ll_os_stat.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/module/ll_os_stat.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/module/ll_os_stat.py Thu Sep 9 18:10:18 2010 @@ -437,5 +437,5 @@ def time_t_to_FILE_TIME(time, filetime): ft = lltype.r_longlong((time + secs_between_epochs) * 10000000) filetime.c_dwHighDateTime = lltype.r_uint(ft >> 32) - filetime.c_dwLowDateTime = lltype.r_uint(ft & ((1 << 32) - 1)) + filetime.c_dwLowDateTime = lltype.r_uint(ft & lltype.r_uint(-1)) Modified: pypy/branch/jit-bounds/pypy/rpython/module/ll_win32file.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/module/ll_win32file.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/module/ll_win32file.py Thu Sep 9 18:10:18 2010 @@ -265,7 +265,8 @@ hFile = CreateFile(path, FILE_WRITE_ATTRIBUTES, 0, None, OPEN_EXISTING, - FILE_FLAG_BACKUP_SEMANTICS, 0) + FILE_FLAG_BACKUP_SEMANTICS, + rwin32.NULL_HANDLE) if hFile == rwin32.INVALID_HANDLE_VALUE: raise rwin32.lastWindowsError() ctime = lltype.nullptr(rwin32.FILETIME) Modified: pypy/branch/jit-bounds/pypy/rpython/test/test_rint.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/test/test_rint.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/test/test_rint.py Thu Sep 9 18:10:18 2010 @@ -117,10 +117,10 @@ assert self.ll_to_string(res) == '413974738222117' def test_unsigned(self): - bigvalue = sys.maxint + 17 + bigvalue = r_uint(sys.maxint + 17) def dummy(i): i = r_uint(i) - j = r_uint(bigvalue) + j = bigvalue return i < j res = self.interpret(dummy,[0]) Modified: pypy/branch/jit-bounds/pypy/rpython/tool/rffi_platform.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/tool/rffi_platform.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/tool/rffi_platform.py Thu Sep 9 18:10:18 2010 @@ -8,6 +8,7 @@ from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.translator.platform import CompilationError from pypy.tool.udir import udir +from pypy.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong, intmask # ____________________________________________________________ # @@ -371,7 +372,7 @@ yield '}' def build_result(self, info, config_result): - return info['value'] + return expose_value_as_rpython(info['value']) class DefinedConstantInteger(CConfigEntry): """An entry in a CConfig class that stands for an externally @@ -397,7 +398,7 @@ def build_result(self, info, config_result): if info["defined"]: - return info['value'] + return expose_value_as_rpython(info['value']) return None class DefinedConstantString(CConfigEntry): @@ -620,6 +621,20 @@ raise TypeError("conflicting field type %r for %r" % (fieldtype, fieldname)) +def expose_value_as_rpython(value): + if intmask(value) == value: + return value + if r_uint(value) == value: + return r_uint(value) + try: + if r_longlong(value) == value: + return r_longlong(value) + except OverflowError: + pass + if r_ulonglong(value) == value: + return r_ulonglong(value) + raise OverflowError("value %d does not fit into any RPython integer type" + % (value,)) C_HEADER = """ #include Modified: pypy/branch/jit-bounds/pypy/rpython/tool/test/test_rffi_platform.py ============================================================================== --- pypy/branch/jit-bounds/pypy/rpython/tool/test/test_rffi_platform.py (original) +++ pypy/branch/jit-bounds/pypy/rpython/tool/test/test_rffi_platform.py Thu Sep 9 18:10:18 2010 @@ -5,6 +5,7 @@ from pypy.tool.udir import udir from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.translator.platform import platform +from pypy.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong def import_ctypes(): try: @@ -357,3 +358,19 @@ padding = list(S._hints['padding']) d = {'c_c1': 'char'} assert S._hints['get_padding_drop'](d) == padding + +def test_expose_value_as_rpython(): + def get(x): + x = rffi_platform.expose_value_as_rpython(x) + return (x, type(x)) + assert get(5) == (5, int) + assert get(-82) == (-82, int) + assert get(sys.maxint) == (sys.maxint, int) + assert get(sys.maxint+1) == (sys.maxint+1, r_uint) + if sys.maxint == 2147483647: + assert get(9999999999) == (9999999999, r_longlong) + assert get(-9999999999) == (-9999999999, r_longlong) + assert get(2**63) == (2**63, r_ulonglong) + assert get(-2**63) == (-2**63, r_longlong) + py.test.raises(OverflowError, get, -2**63-1) + py.test.raises(OverflowError, get, 2**64) Modified: pypy/branch/jit-bounds/pypy/translator/c/database.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/c/database.py (original) +++ pypy/branch/jit-bounds/pypy/translator/c/database.py Thu Sep 9 18:10:18 2010 @@ -2,7 +2,7 @@ Primitive, Ptr, typeOf, RuntimeTypeInfo, \ Struct, Array, FuncType, PyObject, Void, \ ContainerType, OpaqueType, FixedSizeArray, _uninitialized -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.lltypesystem.llmemory import WeakRef, _WeakRefType, GCREF from pypy.rpython.lltypesystem.rffi import CConstant from pypy.rpython.lltypesystem import llgroup @@ -183,6 +183,12 @@ if isinstance(T, Primitive) or T == GCREF: return PrimitiveName[T](obj, self) elif isinstance(T, Ptr): + if (isinstance(T.TO, OpaqueType) and + T.TO.hints.get('c_pointer_typedef') is not None): + if obj._obj is not None: + value = rffi.cast(rffi.SSIZE_T, obj) + return '((%s) %s)' % (cdecl(self.gettype(T), ''), + self.get(value)) if obj: # test if the ptr is non-NULL try: container = obj._obj Modified: pypy/branch/jit-bounds/pypy/translator/c/gcc/instruction.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/c/gcc/instruction.py (original) +++ pypy/branch/jit-bounds/pypy/translator/c/gcc/instruction.py Thu Sep 9 18:10:18 2010 @@ -5,6 +5,14 @@ LOC_MASK = 0x03 LOC_NOWHERE = LOC_REG | 0 +# x86-32 registers sometimes used to pass arguments when gcc optimizes +# a function's calling convention +ARGUMENT_REGISTERS_32 = ('%eax', '%edx', '%ecx') + +# x86-64 registers used to pass arguments +ARGUMENT_REGISTERS_64 = ('%rdi', '%rsi', '%rdx', '%rcx', '%r8', '%r9') + + def frameloc_esp(offset): assert offset >= 0 assert offset % 4 == 0 @@ -19,7 +27,8 @@ class SomeNewValue(object): - pass + def __repr__(self): + return 'somenewvalue' somenewvalue = SomeNewValue() class LocalVar(object): @@ -42,7 +51,7 @@ else: return 1 - def getlocation(self, framesize, uses_frame_pointer): + def getlocation(self, framesize, uses_frame_pointer, wordsize): if (self.hint == 'esp' or not uses_frame_pointer or self.ofs_from_frame_end % 2 != 0): # try to use esp-relative addressing @@ -52,7 +61,7 @@ # we can get an odd value if the framesize is marked as bogus # by visit_andl() assert uses_frame_pointer - ofs_from_ebp = self.ofs_from_frame_end + 4 + ofs_from_ebp = self.ofs_from_frame_end + wordsize return frameloc_ebp(ofs_from_ebp) @@ -81,22 +90,28 @@ self.previous_insns = [] # all insns that jump (or fallthrough) here class InsnFunctionStart(Insn): + _args_ = ['arguments'] framesize = 0 previous_insns = () - def __init__(self, registers): + def __init__(self, registers, wordsize): self.arguments = {} for reg in registers: self.arguments[reg] = somenewvalue + self.wordsize = wordsize def source_of(self, localvar, tag): if localvar not in self.arguments: - if localvar in ('%eax', '%edx', '%ecx'): + if self.wordsize == 4 and localvar in ARGUMENT_REGISTERS_32: # xxx this might show a bug in trackgcroot.py failing to # figure out which instruction stored a value in these # registers. However, this case also occurs when the # the function's calling convention was optimized by gcc: # the 3 registers above are then used to pass arguments pass + elif self.wordsize == 8 and localvar in ARGUMENT_REGISTERS_64: + # this is normal: these registers are always used to + # pass arguments + pass else: assert (isinstance(localvar, LocalVar) and localvar.ofs_from_frame_end > 0), ( @@ -218,15 +233,16 @@ return {self.loc: None} class InsnPrologue(Insn): + def __init__(self, wordsize): + self.wordsize = wordsize def __setattr__(self, attr, value): if attr == 'framesize': - assert value == 4, ("unrecognized function prologue - " - "only supports push %ebp; movl %esp, %ebp") + assert value == self.wordsize, ( + "unrecognized function prologue - " + "only supports push %ebp; movl %esp, %ebp") Insn.__setattr__(self, attr, value) class InsnEpilogue(Insn): def __init__(self, framesize=None): if framesize is not None: self.framesize = framesize - - Modified: pypy/branch/jit-bounds/pypy/translator/c/gcc/test/conftest.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/c/gcc/test/conftest.py (original) +++ pypy/branch/jit-bounds/pypy/translator/c/gcc/test/conftest.py Thu Sep 9 18:10:18 2010 @@ -1,8 +1,6 @@ import py from pypy.jit.backend import detect_cpu - cpu = detect_cpu.autodetect() def pytest_runtest_setup(item): - if cpu != 'x86': + if cpu not in ('x86', 'x86_64'): py.test.skip("x86 directory skipped: cpu is %r" % (cpu,)) - Modified: pypy/branch/jit-bounds/pypy/translator/c/gcc/test/test_trackgcroot.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/c/gcc/test/test_trackgcroot.py (original) +++ pypy/branch/jit-bounds/pypy/translator/c/gcc/test/test_trackgcroot.py Thu Sep 9 18:10:18 2010 @@ -1,51 +1,52 @@ import py import sys, re -from pypy.translator.c.gcc.trackgcroot import format_location -from pypy.translator.c.gcc.trackgcroot import format_callshape from pypy.translator.c.gcc.trackgcroot import LOC_NOWHERE, LOC_REG from pypy.translator.c.gcc.trackgcroot import LOC_EBP_PLUS, LOC_EBP_MINUS from pypy.translator.c.gcc.trackgcroot import LOC_ESP_PLUS from pypy.translator.c.gcc.trackgcroot import ElfAssemblerParser from pypy.translator.c.gcc.trackgcroot import DarwinAssemblerParser -from pypy.translator.c.gcc.trackgcroot import compress_callshape -from pypy.translator.c.gcc.trackgcroot import decompress_callshape from pypy.translator.c.gcc.trackgcroot import PARSERS +from pypy.translator.c.gcc.trackgcroot import ElfFunctionGcRootTracker32 from StringIO import StringIO +import py.test this_dir = py.path.local(__file__).dirpath() def test_format_location(): - assert format_location(LOC_NOWHERE) == '?' - assert format_location(LOC_REG | (1<<2)) == '%ebx' - assert format_location(LOC_REG | (2<<2)) == '%esi' - assert format_location(LOC_REG | (3<<2)) == '%edi' - assert format_location(LOC_REG | (4<<2)) == '%ebp' - assert format_location(LOC_EBP_PLUS + 0) == '(%ebp)' - assert format_location(LOC_EBP_PLUS + 4) == '4(%ebp)' - assert format_location(LOC_EBP_MINUS + 4) == '-4(%ebp)' - assert format_location(LOC_ESP_PLUS + 0) == '(%esp)' - assert format_location(LOC_ESP_PLUS + 4) == '4(%esp)' + cls = ElfFunctionGcRootTracker32 + assert cls.format_location(LOC_NOWHERE) == '?' + assert cls.format_location(LOC_REG | (1<<2)) == '%ebx' + assert cls.format_location(LOC_REG | (2<<2)) == '%esi' + assert cls.format_location(LOC_REG | (3<<2)) == '%edi' + assert cls.format_location(LOC_REG | (4<<2)) == '%ebp' + assert cls.format_location(LOC_EBP_PLUS + 0) == '(%ebp)' + assert cls.format_location(LOC_EBP_PLUS + 4) == '4(%ebp)' + assert cls.format_location(LOC_EBP_MINUS + 4) == '-4(%ebp)' + assert cls.format_location(LOC_ESP_PLUS + 0) == '(%esp)' + assert cls.format_location(LOC_ESP_PLUS + 4) == '4(%esp)' def test_format_callshape(): + cls = ElfFunctionGcRootTracker32 expected = ('{4(%ebp) ' # position of the return address '| 8(%ebp), 12(%ebp), 16(%ebp), 20(%ebp) ' # 4 saved regs '| 24(%ebp), 28(%ebp)}') # GC roots - assert format_callshape((LOC_EBP_PLUS+4, - LOC_EBP_PLUS+8, - LOC_EBP_PLUS+12, - LOC_EBP_PLUS+16, - LOC_EBP_PLUS+20, - LOC_EBP_PLUS+24, - LOC_EBP_PLUS+28)) == expected + assert cls.format_callshape((LOC_EBP_PLUS+4, + LOC_EBP_PLUS+8, + LOC_EBP_PLUS+12, + LOC_EBP_PLUS+16, + LOC_EBP_PLUS+20, + LOC_EBP_PLUS+24, + LOC_EBP_PLUS+28)) == expected def test_compress_callshape(): + cls = ElfFunctionGcRootTracker32 shape = (1, 127, 0x1234, 0x5678, 0x234567, 0x765432, 0x61626364, 0x41424344) - bytes = list(compress_callshape(shape)) + bytes = list(cls.compress_callshape(shape)) print bytes assert len(bytes) == 1+1+2+3+4+4+5+5+1 - assert decompress_callshape(bytes) == list(shape) + assert cls.decompress_callshape(bytes) == list(shape) def test_find_functions_elf(): source = """\ @@ -108,7 +109,7 @@ def test_computegcmaptable(): tests = [] - for format in ('elf', 'darwin', 'msvc'): + for format in ('elf', 'darwin', 'msvc', 'elf64'): for path in this_dir.join(format).listdir("track*.s"): n = path.purebasename[5:] try: @@ -138,7 +139,7 @@ tabledict = {} seen = {} for entry in table: - print '%s: %s' % (entry[0], format_callshape(entry[1])) + print '%s: %s' % (entry[0], tracker.format_callshape(entry[1])) tabledict[entry[0]] = entry[1] # find the ";; expected" lines prevline = "" @@ -151,7 +152,7 @@ label = prevmatch.group(1) assert label in tabledict got = tabledict[label] - assert format_callshape(got) == expected + assert tracker.format_callshape(got) == expected seen[label] = True if format == 'msvc': expectedlines.insert(i-2, 'PUBLIC\t%s\n' % (label,)) Modified: pypy/branch/jit-bounds/pypy/translator/c/gcc/trackgcroot.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/c/gcc/trackgcroot.py (original) +++ pypy/branch/jit-bounds/pypy/translator/c/gcc/trackgcroot.py Thu Sep 9 18:10:18 2010 @@ -72,7 +72,7 @@ if self.is_stack_bottom: retaddr = LOC_NOWHERE # end marker for asmgcroot.py elif self.uses_frame_pointer: - retaddr = frameloc_ebp(4) + retaddr = frameloc_ebp(self.WORD) else: retaddr = frameloc_esp(insn.framesize) shape = [retaddr] @@ -84,7 +84,8 @@ for localvar, tag in insn.gcroots.items(): if isinstance(localvar, LocalVar): loc = localvar.getlocation(insn.framesize, - self.uses_frame_pointer) + self.uses_frame_pointer, + self.WORD) elif localvar in self.REG2LOC: loc = self.REG2LOC[localvar] else: @@ -148,7 +149,7 @@ lst.append(previnsn) def parse_instructions(self): - self.insns = [InsnFunctionStart(self.CALLEE_SAVE_REGISTERS)] + self.insns = [InsnFunctionStart(self.CALLEE_SAVE_REGISTERS, self.WORD)] ignore_insns = False for lineno, line in enumerate(self.lines): if lineno < self.skip: @@ -263,7 +264,7 @@ ofs_from_ebp = int(match.group(1) or '0') if self.format == 'msvc': ofs_from_ebp += int(match.group(2) or '0') - localvar = ofs_from_ebp - 4 + localvar = ofs_from_ebp - self.WORD assert localvar != 0 # that's the return address return LocalVar(localvar, hint='ebp') return localvar @@ -357,6 +358,56 @@ self.lines.insert(call.lineno+1, '\t.globl\t%s\n' % (label,)) call.global_label = label + @classmethod + def compress_callshape(cls, shape): + # For a single shape, this turns the list of integers into a list of + # bytes and reverses the order of the entries. The length is + # encoded by inserting a 0 marker after the gc roots coming from + # shape[N:] and before the N values coming from shape[N-1] to + # shape[0] (for N == 5 on 32-bit or 7 on 64-bit platforms). + # In practice it seems that shapes contain many integers + # whose value is up to a few thousands, which the algorithm below + # compresses down to 2 bytes. Very small values compress down to a + # single byte. + + # Callee-save regs plus ret addr + min_size = len(cls.CALLEE_SAVE_REGISTERS) + 1 + + assert len(shape) >= min_size + shape = list(shape) + assert 0 not in shape[min_size:] + shape.insert(min_size, 0) + result = [] + for loc in shape: + assert loc >= 0 + flag = 0 + while loc >= 0x80: + result.append(int(loc & 0x7F) | flag) + flag = 0x80 + loc >>= 7 + result.append(int(loc) | flag) + result.reverse() + return result + + @classmethod + def decompress_callshape(cls, bytes): + # For tests. This logic is copied in asmgcroot.py. + result = [] + n = 0 + while n < len(bytes): + value = 0 + while True: + b = bytes[n] + n += 1 + value += b + if b < 0x80: + break + value = (value - 0x80) << 7 + result.append(value) + result.reverse() + assert result[5] == 0 + del result[5] + return result # ____________________________________________________________ CANNOT_COLLECT = { # some of the most used functions that cannot collect @@ -385,10 +436,9 @@ 'inc', 'dec', 'not', 'neg', 'or', 'and', 'sbb', 'adc', 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', + 'punpck', 'pshufd', # zero-extending moves should not produce GC pointers 'movz', - # quadword operations - 'movq', ]) visit_movb = visit_nop @@ -400,7 +450,7 @@ visit_xorb = visit_nop visit_xorw = visit_nop - def visit_addl(self, line, sign=+1): + def _visit_add(self, line, sign=+1): match = self.r_binaryinsn.match(line) source = match.group("source") target = match.group("target") @@ -415,8 +465,8 @@ else: return [] - def visit_subl(self, line): - return self.visit_addl(line, sign=-1) + def _visit_sub(self, line): + return self._visit_add(line, sign=-1) def unary_insn(self, line): match = self.r_unaryinsn.match(line) @@ -439,8 +489,6 @@ else: return [] - visit_xorl = binary_insn # used in "xor reg, reg" to create a NULL GC ptr - visit_orl = binary_insn # The various cmov* operations for name in ''' e ne g ge l le a ae b be p np s ns o no @@ -448,7 +496,7 @@ locals()['visit_cmov' + name] = binary_insn locals()['visit_cmov' + name + 'l'] = binary_insn - def visit_andl(self, line): + def _visit_and(self, line): match = self.r_binaryinsn.match(line) target = match.group("target") if target == self.ESP: @@ -460,9 +508,7 @@ else: return self.binary_insn(line) - visit_and = visit_andl - - def visit_leal(self, line): + def _visit_lea(self, line): match = self.r_binaryinsn.match(line) target = match.group("target") if target == self.ESP: @@ -474,7 +520,7 @@ raise UnrecognizedOperation('epilogue without prologue') ofs_from_ebp = int(match.group(1) or '0') assert ofs_from_ebp <= 0 - framesize = 4 - ofs_from_ebp + framesize = self.WORD - ofs_from_ebp else: match = self.r_localvar_esp.match(source) # leal 12(%esp), %esp @@ -489,17 +535,23 @@ def insns_for_copy(self, source, target): source = self.replace_symbols(source) target = self.replace_symbols(target) - if source == self.ESP or target == self.ESP: + if target == self.ESP: raise UnrecognizedOperation('%s -> %s' % (source, target)) elif self.r_localvar.match(target): if self.r_localvar.match(source): + # eg, movl %eax, %ecx: possibly copies a GC root return [InsnCopyLocal(source, target)] else: + # eg, movl (%eax), %edi or mov %esp, %edi: load a register + # from "outside". If it contains a pointer to a GC root, + # it will be announced later with the GCROOT macro. return [InsnSetLocal(target, [source])] else: + # eg, movl %ebx, (%edx) or mov %ebp, %esp: does not write into + # a general register return [] - def visit_movl(self, line): + def _visit_mov(self, line): match = self.r_binaryinsn.match(line) source = match.group("source") target = match.group("target") @@ -513,34 +565,24 @@ # gcc -fno-unit-at-a-time. return self.insns_for_copy(source, target) - visit_mov = visit_movl - - def visit_pushl(self, line): + def _visit_push(self, line): match = self.r_unaryinsn.match(line) source = match.group(1) - return [InsnStackAdjust(-4)] + self.insns_for_copy(source, self.TOP_OF_STACK) - - def visit_pushw(self, line): - return [InsnStackAdjust(-2)] # rare but not impossible + return [InsnStackAdjust(-self.WORD)] + self.insns_for_copy(source, self.TOP_OF_STACK) def _visit_pop(self, target): - return self.insns_for_copy(self.TOP_OF_STACK, target) + [InsnStackAdjust(+4)] - - def visit_popl(self, line): - match = self.r_unaryinsn.match(line) - target = match.group(1) - return self._visit_pop(target) + return self.insns_for_copy(self.TOP_OF_STACK, target) + [InsnStackAdjust(+self.WORD)] def _visit_prologue(self): # for the prologue of functions that use %ebp as frame pointer self.uses_frame_pointer = True self.r_localvar = self.r_localvarfp - return [InsnPrologue()] + return [InsnPrologue(self.WORD)] def _visit_epilogue(self): if not self.uses_frame_pointer: raise UnrecognizedOperation('epilogue without prologue') - return [InsnEpilogue(4)] + return [InsnEpilogue(self.WORD)] def visit_leave(self, line): return self._visit_epilogue() + self._visit_pop(self.EBP) @@ -662,7 +704,7 @@ visit_jc = conditional_jump visit_jnc = conditional_jump - def visit_xchgl(self, line): + def _visit_xchg(self, line): # only support the format used in VALGRIND_DISCARD_TRANSLATIONS # which is to use a marker no-op "xchgl %ebx, %ebx" match = self.r_binaryinsn.match(line) @@ -741,8 +783,172 @@ insns.append(InsnStackAdjust(16)) return insns + # __________ debugging output __________ + + @classmethod + def format_location(cls, loc): + # A 'location' is a single number describing where a value is stored + # across a call. It can be in one of the CALLEE_SAVE_REGISTERS, or + # in the stack frame at an address relative to either %esp or %ebp. + # The last two bits of the location number are used to tell the cases + # apart; see format_location(). + assert loc >= 0 + kind = loc & LOC_MASK + if kind == LOC_REG: + if loc == LOC_NOWHERE: + return '?' + reg = (loc >> 2) - 1 + return '%' + cls.CALLEE_SAVE_REGISTERS[reg].replace("%", "") + else: + offset = loc & ~ LOC_MASK + if kind == LOC_EBP_PLUS: + result = '(%' + cls.EBP.replace("%", "") + ')' + elif kind == LOC_EBP_MINUS: + result = '(%' + cls.EBP.replace("%", "") + ')' + offset = -offset + elif kind == LOC_ESP_PLUS: + result = '(%' + cls.ESP.replace("%", "") + ')' + else: + assert 0, kind + if offset != 0: + result = str(offset) + result + return result + + @classmethod + def format_callshape(cls, shape): + # A 'call shape' is a tuple of locations in the sense of + # format_location(). They describe where in a function frame + # interesting values are stored, when this function executes a 'call' + # instruction. + # + # shape[0] is the location that stores the fn's own return + # address (not the return address for the currently + # executing 'call') + # + # shape[1..N] is where the fn saved its own caller's value of a + # certain callee save register. (where N is the number + # of callee save registers.) + # + # shape[>N] are GC roots: where the fn has put its local GCPTR + # vars + # + num_callee_save_regs = len(cls.CALLEE_SAVE_REGISTERS) + assert isinstance(shape, tuple) + # + 1 for the return address + assert len(shape) >= (num_callee_save_regs + 1) + result = [cls.format_location(loc) for loc in shape] + return '{%s | %s | %s}' % (result[0], + ', '.join(result[1:(num_callee_save_regs+1)]), + ', '.join(result[(num_callee_save_regs+1):])) + + +class FunctionGcRootTracker32(FunctionGcRootTracker): + WORD = 4 + + visit_mov = FunctionGcRootTracker._visit_mov + visit_movl = FunctionGcRootTracker._visit_mov + visit_pushl = FunctionGcRootTracker._visit_push + visit_leal = FunctionGcRootTracker._visit_lea + + visit_addl = FunctionGcRootTracker._visit_add + visit_subl = FunctionGcRootTracker._visit_sub + visit_andl = FunctionGcRootTracker._visit_and + visit_and = FunctionGcRootTracker._visit_and + + visit_xchgl = FunctionGcRootTracker._visit_xchg + + # used in "xor reg, reg" to create a NULL GC ptr + visit_xorl = FunctionGcRootTracker.binary_insn + visit_orl = FunctionGcRootTracker.binary_insn # unsure about this one + + # occasionally used on 32-bits to move floats around + visit_movq = FunctionGcRootTracker.visit_nop + + def visit_pushw(self, line): + return [InsnStackAdjust(-2)] # rare but not impossible -class ElfFunctionGcRootTracker(FunctionGcRootTracker): + def visit_popl(self, line): + match = self.r_unaryinsn.match(line) + target = match.group(1) + return self._visit_pop(target) + +class FunctionGcRootTracker64(FunctionGcRootTracker): + WORD = 8 + + # Regex ignores destination + r_save_xmm_register = re.compile(r"\tmovaps\s+%xmm(\d+)") + + def _maybe_32bit_dest(func): + def wrapper(self, line): + # Using a 32-bit reg as a destination in 64-bit mode zero-extends + # to 64-bits, so sometimes gcc uses a 32-bit operation to copy a + # statically known pointer to a register + + # %eax -> %rax + new_line = re.sub(r"%e(ax|bx|cx|dx|di|si)$", r"%r\1", line) + # %r10d -> %r10 + new_line = re.sub(r"%r(\d+)d$", r"%r\1", new_line) + return func(self, new_line) + return wrapper + + visit_addl = FunctionGcRootTracker.visit_nop + visit_subl = FunctionGcRootTracker.visit_nop + visit_leal = FunctionGcRootTracker.visit_nop + + visit_cltq = FunctionGcRootTracker.visit_nop + + visit_movq = FunctionGcRootTracker._visit_mov + # just a special assembler mnemonic for mov + visit_movabsq = FunctionGcRootTracker._visit_mov + visit_mov = _maybe_32bit_dest(FunctionGcRootTracker._visit_mov) + visit_movl = visit_mov + + visit_xorl = _maybe_32bit_dest(FunctionGcRootTracker.binary_insn) + + visit_pushq = FunctionGcRootTracker._visit_push + + visit_addq = FunctionGcRootTracker._visit_add + visit_subq = FunctionGcRootTracker._visit_sub + + visit_leaq = FunctionGcRootTracker._visit_lea + + visit_xorq = FunctionGcRootTracker.binary_insn + + # FIXME: similar to visit_popl for 32-bit + def visit_popq(self, line): + match = self.r_unaryinsn.match(line) + target = match.group(1) + return self._visit_pop(target) + + def visit_jmp(self, line): + # On 64-bit, %al is used when calling varargs functions to specify an + # upper-bound on the number of xmm registers used in the call. gcc + # uses %al to compute an indirect jump that looks like: + # + # jmp *[some register] + # movaps %xmm7, [stack location] + # movaps %xmm6, [stack location] + # movaps %xmm5, [stack location] + # movaps %xmm4, [stack location] + # movaps %xmm3, [stack location] + # movaps %xmm2, [stack location] + # movaps %xmm1, [stack location] + # movaps %xmm0, [stack location] + # + # The jmp is always to somewhere in the block of "movaps" + # instructions, according to how many xmm registers need to be saved + # to the stack. The point of all this is that we can safely ignore + # jmp instructions of that form. + if (self.currentlineno + 8) < len(self.lines) and self.r_unaryinsn_star.match(line): + matches = [self.r_save_xmm_register.match(self.lines[self.currentlineno + 1 + i]) for i in range(8)] + if all(m and int(m.group(1)) == (7 - i) for i, m in enumerate(matches)): + return [] + + return FunctionGcRootTracker.visit_jmp(self, line) + + + +class ElfFunctionGcRootTracker32(FunctionGcRootTracker32): format = 'elf' ESP = '%esp' @@ -791,7 +997,65 @@ match = self.r_functionend.match(lines[-1]) assert funcname == match.group(1) assert funcname == match.group(2) - super(ElfFunctionGcRootTracker, self).__init__( + super(ElfFunctionGcRootTracker32, self).__init__( + funcname, lines, filetag) + + def extract_immediate(self, value): + if not value.startswith('$'): + return None + return int(value[1:]) + +ElfFunctionGcRootTracker32.init_regexp() + +class ElfFunctionGcRootTracker64(FunctionGcRootTracker64): + format = 'elf64' + ESP = '%rsp' + EBP = '%rbp' + EAX = '%rax' + CALLEE_SAVE_REGISTERS = ['%rbx', '%r12', '%r13', '%r14', '%r15', '%rbp'] + REG2LOC = dict((_reg, LOC_REG | ((_i+1)<<2)) + for _i, _reg in enumerate(CALLEE_SAVE_REGISTERS)) + OPERAND = r'(?:[-\w$%+.:@"]+(?:[(][\w%,]+[)])?|[(][\w%,]+[)])' + LABEL = r'([a-zA-Z_$.][a-zA-Z0-9_$@.]*)' + OFFSET_LABELS = 2**30 + TOP_OF_STACK = '0(%rsp)' + + r_functionstart = re.compile(r"\t.type\s+"+LABEL+",\s*[@]function\s*$") + r_functionend = re.compile(r"\t.size\s+"+LABEL+",\s*[.]-"+LABEL+"\s*$") + LOCALVAR = r"%rax|%rbx|%rcx|%rdx|%rdi|%rsi|%rbp|%r8|%r9|%r10|%r11|%r12|%r13|%r14|%r15|-?\d*[(]%rsp[)]" + LOCALVARFP = LOCALVAR + r"|-?\d*[(]%rbp[)]" + r_localvarnofp = re.compile(LOCALVAR) + r_localvarfp = re.compile(LOCALVARFP) + r_localvar_esp = re.compile(r"(-?\d*)[(]%rsp[)]") + r_localvar_ebp = re.compile(r"(-?\d*)[(]%rbp[)]") + + r_rel_label = re.compile(r"(\d+):\s*$") + r_jump_rel_label = re.compile(r"\tj\w+\s+"+"(\d+)f"+"\s*$") + + r_unaryinsn_star= re.compile(r"\t[a-z]\w*\s+[*]("+OPERAND+")\s*$") + r_jmptable_item = re.compile(r"\t.quad\t"+LABEL+"(-\"[A-Za-z0-9$]+\")?\s*$") + r_jmptable_end = re.compile(r"\t.text|\t.section\s+.text|\t\.align|"+LABEL) + + r_gcroot_marker = re.compile(r"\t/[*] GCROOT ("+LOCALVARFP+") [*]/") + r_gcnocollect_marker = re.compile(r"\t/[*] GC_NOCOLLECT ("+OPERAND+") [*]/") + r_bottom_marker = re.compile(r"\t/[*] GC_STACK_BOTTOM [*]/") + + FUNCTIONS_NOT_RETURNING = { + 'abort': None, + '_exit': None, + '__assert_fail': None, + '___assert_rtn': None, + 'L___assert_rtn$stub': None, + 'L___eprintf$stub': None, + } + + def __init__(self, lines, filetag=0): + match = self.r_functionstart.match(lines[0]) + funcname = match.group(1) + match = self.r_functionend.match(lines[-1]) + assert funcname == match.group(1) + assert funcname == match.group(2) + super(ElfFunctionGcRootTracker64, self).__init__( funcname, lines, filetag) def extract_immediate(self, value): @@ -799,9 +1063,9 @@ return None return int(value[1:]) -ElfFunctionGcRootTracker.init_regexp() +ElfFunctionGcRootTracker64.init_regexp() -class DarwinFunctionGcRootTracker(ElfFunctionGcRootTracker): +class DarwinFunctionGcRootTracker(ElfFunctionGcRootTracker32): format = 'darwin' r_functionstart = re.compile(r"_(\w+):\s*$") @@ -810,7 +1074,7 @@ def __init__(self, lines, filetag=0): match = self.r_functionstart.match(lines[0]) funcname = '_' + match.group(1) - FunctionGcRootTracker.__init__(self, funcname, lines, filetag) + FunctionGcRootTracker32.__init__(self, funcname, lines, filetag) class Mingw32FunctionGcRootTracker(DarwinFunctionGcRootTracker): format = 'mingw32' @@ -821,7 +1085,7 @@ '__assert': None, } -class MsvcFunctionGcRootTracker(FunctionGcRootTracker): +class MsvcFunctionGcRootTracker(FunctionGcRootTracker32): format = 'msvc' ESP = 'esp' EBP = 'ebp' @@ -906,12 +1170,12 @@ push pop mov lea xor sub add '''.split(): - locals()['visit_' + name] = getattr(FunctionGcRootTracker, + locals()['visit_' + name] = getattr(FunctionGcRootTracker32, 'visit_' + name + 'l') - visit_int = FunctionGcRootTracker.visit_nop + visit_int = FunctionGcRootTracker32.visit_nop # probably not GC pointers - visit_cdq = FunctionGcRootTracker.visit_nop + visit_cdq = FunctionGcRootTracker32.visit_nop def visit_npad(self, line): # MASM has a nasty bug: it implements "npad 5" with "add eax, 0" @@ -1038,7 +1302,7 @@ table = tracker.computegcmaptable(self.verbose) if self.verbose > 1: for label, state in table: - print >> sys.stderr, label, '\t', format_callshape(state) + print >> sys.stderr, label, '\t', tracker.format_callshape(state) table = compress_gcmaptable(table) if self.shuffle and random.random() < 0.5: self.gcmaptable[:0] = table @@ -1049,7 +1313,7 @@ class ElfAssemblerParser(AssemblerParser): format = "elf" - FunctionGcRootTracker = ElfFunctionGcRootTracker + FunctionGcRootTracker = ElfFunctionGcRootTracker32 def find_functions(self, iterlines): functionlines = [] @@ -1072,6 +1336,10 @@ "missed the end of the previous function") yield False, functionlines +class ElfAssemblerParser64(ElfAssemblerParser): + format = "elf64" + FunctionGcRootTracker = ElfFunctionGcRootTracker64 + class DarwinAssemblerParser(AssemblerParser): format = "darwin" FunctionGcRootTracker = DarwinFunctionGcRootTracker @@ -1241,6 +1509,7 @@ PARSERS = { 'elf': ElfAssemblerParser, + 'elf64': ElfAssemblerParser64, 'darwin': DarwinAssemblerParser, 'mingw32': Mingw32AssemblerParser, 'msvc': MsvcAssemblerParser, @@ -1281,6 +1550,13 @@ txt = kwargs[self.format] print >> output, "\t%s" % txt + if self.format == 'elf64': + word_decl = '.quad' + else: + word_decl = '.long' + + tracker_cls = PARSERS[self.format].FunctionGcRootTracker + # The pypy_asm_stackwalk() function if self.format == 'msvc': @@ -1327,7 +1603,56 @@ } } """ + elif self.format == 'elf64': + print >> output, "\t.text" + print >> output, "\t.globl %s" % _globalname('pypy_asm_stackwalk') + print >> output, "\t.type pypy_asm_stackwalk, @function" + print >> output, "%s:" % _globalname('pypy_asm_stackwalk') + + print >> output, """\ + /* See description in asmgcroot.py */ + movq\t%rdi, %rdx\t/* 1st argument, which is the callback */ + movq\t%rsi, %rcx\t/* 2nd argument, which is gcrootanchor */ + movq\t%rsp, %rax\t/* my frame top address */ + pushq\t%rax\t\t/* ASM_FRAMEDATA[8] */ + pushq\t%rbp\t\t/* ASM_FRAMEDATA[7] */ + pushq\t%r15\t\t/* ASM_FRAMEDATA[6] */ + pushq\t%r14\t\t/* ASM_FRAMEDATA[5] */ + pushq\t%r13\t\t/* ASM_FRAMEDATA[4] */ + pushq\t%r12\t\t/* ASM_FRAMEDATA[3] */ + pushq\t%rbx\t\t/* ASM_FRAMEDATA[2] */ + + /* Add this ASM_FRAMEDATA to the front of the circular linked */ + /* list. Let's call it 'self'. */ + movq\t8(%rcx), %rax\t/* next = gcrootanchor->next */ + pushq\t%rax\t\t\t\t/* self->next = next */ + pushq\t%rcx\t\t\t/* self->prev = gcrootanchor */ + movq\t%rsp, 8(%rcx)\t/* gcrootanchor->next = self */ + movq\t%rsp, 0(%rax)\t\t\t/* next->prev = self */ + + /* note: the Mac OS X 16 bytes aligment must be respected. */ + call\t*%rdx\t\t/* invoke the callback */ + + /* Detach this ASM_FRAMEDATA from the circular linked list */ + popq\t%rsi\t\t/* prev = self->prev */ + popq\t%rdi\t\t/* next = self->next */ + movq\t%rdi, 8(%rsi)\t/* prev->next = next */ + movq\t%rsi, 0(%rdi)\t/* next->prev = prev */ + + popq\t%rbx\t\t/* restore from ASM_FRAMEDATA[2] */ + popq\t%r12\t\t/* restore from ASM_FRAMEDATA[3] */ + popq\t%r13\t\t/* restore from ASM_FRAMEDATA[4] */ + popq\t%r14\t\t/* restore from ASM_FRAMEDATA[5] */ + popq\t%r15\t\t/* restore from ASM_FRAMEDATA[6] */ + popq\t%rbp\t\t/* restore from ASM_FRAMEDATA[7] */ + popq\t%rcx\t\t/* ignored ASM_FRAMEDATA[8] */ + + /* the return value is the one of the 'call' above, */ + /* because %rax (and possibly %rdx) are unmodified */ + ret + .size pypy_asm_stackwalk, .-pypy_asm_stackwalk + """ else: print >> output, "\t.text" print >> output, "\t.globl %s" % _globalname('pypy_asm_stackwalk') @@ -1401,7 +1726,7 @@ n = shapes[state] except KeyError: n = shapes[state] = shapeofs - bytes = [str(b) for b in compress_callshape(state)] + bytes = [str(b) for b in tracker_cls.compress_callshape(state)] shapelines.append('\t%s,\t/* %s */\n' % ( ', '.join(bytes), shapeofs)) @@ -1433,17 +1758,18 @@ n = shapes[state] except KeyError: n = shapes[state] = shapeofs - bytes = [str(b) for b in compress_callshape(state)] + bytes = [str(b) for b in tracker_cls.compress_callshape(state)] shapelines.append('\t/*%d*/\t.byte\t%s\n' % ( shapeofs, ', '.join(bytes))) shapeofs += len(bytes) if is_range: n = ~ n - print >> output, '\t.long\t%s-%d' % ( + print >> output, '\t%s\t%s-%d' % ( + word_decl, label, - PARSERS[self.format].FunctionGcRootTracker.OFFSET_LABELS) - print >> output, '\t.long\t%d' % (n,) + tracker_cls.OFFSET_LABELS) + print >> output, '\t%s\t%d' % (word_decl, n) print >> output, """\ .globl __gcmapend @@ -1451,6 +1777,7 @@ """.replace("__gcmapend", _globalname("__gcmapend")) _variant(elf='.section\t.rodata', + elf64='.section\t.rodata', darwin='.const', mingw32='') @@ -1483,56 +1810,6 @@ pass -# __________ debugging output __________ - -def format_location(loc): - # A 'location' is a single number describing where a value is stored - # across a call. It can be in one of the CALLEE_SAVE_REGISTERS, or - # in the stack frame at an address relative to either %esp or %ebp. - # The last two bits of the location number are used to tell the cases - # apart; see format_location(). - assert loc >= 0 - kind = loc & LOC_MASK - if kind == LOC_REG: - if loc == LOC_NOWHERE: - return '?' - reg = (loc >> 2) - 1 - return ElfFunctionGcRootTracker.CALLEE_SAVE_REGISTERS[reg] - else: - offset = loc & ~ LOC_MASK - if kind == LOC_EBP_PLUS: - result = '(%ebp)' - elif kind == LOC_EBP_MINUS: - result = '(%ebp)' - offset = -offset - elif kind == LOC_ESP_PLUS: - result = '(%esp)' - else: - assert 0, kind - if offset != 0: - result = str(offset) + result - return result - -def format_callshape(shape): - # A 'call shape' is a tuple of locations in the sense of format_location(). - # They describe where in a function frame interesting values are stored, - # when this function executes a 'call' instruction. - # - # shape[0] is the location that stores the fn's own return address - # (not the return address for the currently executing 'call') - # shape[1] is where the fn saved its own caller's %ebx value - # shape[2] is where the fn saved its own caller's %esi value - # shape[3] is where the fn saved its own caller's %edi value - # shape[4] is where the fn saved its own caller's %ebp value - # shape[>=5] are GC roots: where the fn has put its local GCPTR vars - # - assert isinstance(shape, tuple) - assert len(shape) >= 5 - result = [format_location(loc) for loc in shape] - return '{%s | %s | %s}' % (result[0], - ', '.join(result[1:5]), - ', '.join(result[5:])) - # __________ table compression __________ def compress_gcmaptable(table): @@ -1559,49 +1836,6 @@ yield (label1, state, is_range) i = j -def compress_callshape(shape): - # For a single shape, this turns the list of integers into a list of - # bytes and reverses the order of the entries. The length is - # encoded by inserting a 0 marker after the gc roots coming from - # shape[5:] and before the 5 values coming from shape[4] to - # shape[0]. In practice it seems that shapes contain many integers - # whose value is up to a few thousands, which the algorithm below - # compresses down to 2 bytes. Very small values compress down to a - # single byte. - assert len(shape) >= 5 - shape = list(shape) - assert 0 not in shape[5:] - shape.insert(5, 0) - result = [] - for loc in shape: - assert loc >= 0 - flag = 0 - while loc >= 0x80: - result.append(int(loc & 0x7F) | flag) - flag = 0x80 - loc >>= 7 - result.append(int(loc) | flag) - result.reverse() - return result - -def decompress_callshape(bytes): - # For tests. This logic is copied in asmgcroot.py. - result = [] - n = 0 - while n < len(bytes): - value = 0 - while True: - b = bytes[n] - n += 1 - value += b - if b < 0x80: - break - value = (value - 0x80) << 7 - result.append(value) - result.reverse() - assert result[5] == 0 - del result[5] - return result def getidentifier(s): def mapchar(c): @@ -1626,7 +1860,10 @@ elif sys.platform == 'win32': format = 'mingw32' else: - format = 'elf' + if sys.maxint > 2147483647: + format = 'elf64' + else: + format = 'elf' entrypoint = 'main' while len(sys.argv) > 1: if sys.argv[1] == '-v': Modified: pypy/branch/jit-bounds/pypy/translator/platform/linux.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/platform/linux.py (original) +++ pypy/branch/jit-bounds/pypy/translator/platform/linux.py Thu Sep 9 18:10:18 2010 @@ -3,7 +3,7 @@ from pypy.translator.platform import _run_subprocess from pypy.translator.platform.posix import BasePosix -class Linux(BasePosix): +class BaseLinux(BasePosix): name = "linux" link_flags = ('-pthread', '-lrt') @@ -25,10 +25,12 @@ return self._pkg_config("libffi", "--libs-only-L", ['/usr/lib/libffi']) + +class Linux(BaseLinux): def library_dirs_for_libffi_a(self): # places where we need to look for libffi.a return self.library_dirs_for_libffi() + ['/usr/lib'] -class Linux64(Linux): - shared_only = ('-fPIC',) +class Linux64(BaseLinux): + pass Modified: pypy/branch/jit-bounds/pypy/translator/platform/posix.py ============================================================================== --- pypy/branch/jit-bounds/pypy/translator/platform/posix.py (original) +++ pypy/branch/jit-bounds/pypy/translator/platform/posix.py Thu Sep 9 18:10:18 2010 @@ -4,7 +4,7 @@ from pypy.translator.platform import Platform, log, _run_subprocess from pypy.tool import autopath -import py, os +import py, os, sys class BasePosix(Platform): exe_ext = '' @@ -104,6 +104,13 @@ else: target_name = exe_name.basename + cflags = self.cflags + if sys.maxint > 2147483647: # XXX XXX XXX sort this out + if shared: + cflags = self.cflags + self.shared_only + else: + cflags = self.cflags + self.standalone_only + m = GnuMakefile(path) m.exe_name = exe_name m.eci = eci @@ -132,7 +139,7 @@ ('LIBS', self._libs(eci.libraries)), ('LIBDIRS', self._libdirs(eci.library_dirs)), ('INCLUDEDIRS', self._includedirs(rel_includedirs)), - ('CFLAGS', self.cflags), + ('CFLAGS', cflags), ('CFLAGSEXTRA', list(eci.compile_extra)), ('LDFLAGS', linkflags), ('LDFLAGSEXTRA', list(eci.link_extra)), From hakanardo at codespeak.net Thu Sep 9 18:23:44 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Thu, 9 Sep 2010 18:23:44 +0200 (CEST) Subject: [pypy-svn] r76978 - in pypy/trunk: . pypy/jit/metainterp pypy/jit/metainterp/optimizeopt pypy/jit/metainterp/test pypy/module/array/benchmark pypy/module/array/test pypy/module/pypyjit/test pypy/rlib/rsre pypy/rlib/rsre/test Message-ID: <20100909162344.23EB7282B9C@codespeak.net> Author: hakanardo Date: Thu Sep 9 18:23:42 2010 New Revision: 76978 Added: pypy/trunk/pypy/jit/metainterp/optimizeopt/ - copied from r76976, pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/ pypy/trunk/pypy/jit/metainterp/test/test_intbound.py - copied unchanged from r76976, pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_intbound.py pypy/trunk/pypy/module/pypyjit/test/randomized.py - copied unchanged from r76976, pypy/branch/jit-bounds/pypy/module/pypyjit/test/randomized.py pypy/trunk/pypy/rlib/rsre/ (props changed) - copied from r76976, pypy/branch/jit-bounds/pypy/rlib/rsre/ pypy/trunk/pypy/rlib/rsre/__init__.py - copied unchanged from r76976, pypy/branch/jit-bounds/pypy/rlib/rsre/__init__.py pypy/trunk/pypy/rlib/rsre/rsre_char.py - copied unchanged from r76976, pypy/branch/jit-bounds/pypy/rlib/rsre/rsre_char.py pypy/trunk/pypy/rlib/rsre/rsre_core.py - copied unchanged from r76976, pypy/branch/jit-bounds/pypy/rlib/rsre/rsre_core.py pypy/trunk/pypy/rlib/rsre/rsre_re.py - copied unchanged from r76976, pypy/branch/jit-bounds/pypy/rlib/rsre/rsre_re.py pypy/trunk/pypy/rlib/rsre/test/ (props changed) - copied from r76976, pypy/branch/jit-bounds/pypy/rlib/rsre/test/ pypy/trunk/pypy/rlib/rsre/test/__init__.py - copied unchanged from r76976, pypy/branch/jit-bounds/pypy/rlib/rsre/test/__init__.py pypy/trunk/pypy/rlib/rsre/test/re_tests.py - copied unchanged from r76976, pypy/branch/jit-bounds/pypy/rlib/rsre/test/re_tests.py pypy/trunk/pypy/rlib/rsre/test/targetrsre.py - copied unchanged from r76976, pypy/branch/jit-bounds/pypy/rlib/rsre/test/targetrsre.py pypy/trunk/pypy/rlib/rsre/test/test_char.py - copied unchanged from r76976, pypy/branch/jit-bounds/pypy/rlib/rsre/test/test_char.py pypy/trunk/pypy/rlib/rsre/test/test_match.py - copied unchanged from r76976, pypy/branch/jit-bounds/pypy/rlib/rsre/test/test_match.py pypy/trunk/pypy/rlib/rsre/test/test_re.py - copied unchanged from r76976, pypy/branch/jit-bounds/pypy/rlib/rsre/test/test_re.py pypy/trunk/pypy/rlib/rsre/test/test_search.py - copied unchanged from r76976, pypy/branch/jit-bounds/pypy/rlib/rsre/test/test_search.py pypy/trunk/pypy/rlib/rsre/test/test_zexternal.py - copied unchanged from r76976, pypy/branch/jit-bounds/pypy/rlib/rsre/test/test_zexternal.py pypy/trunk/pypy/rlib/rsre/test/test_zinterp.py - copied unchanged from r76976, pypy/branch/jit-bounds/pypy/rlib/rsre/test/test_zinterp.py Removed: pypy/trunk/pypy/jit/metainterp/optimizeopt.py Modified: pypy/trunk/ (props changed) pypy/trunk/pypy/jit/metainterp/test/test_basic.py pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py pypy/trunk/pypy/jit/metainterp/test/test_resume.py pypy/trunk/pypy/module/array/benchmark/Makefile (props changed) pypy/trunk/pypy/module/array/benchmark/intimg.c (props changed) pypy/trunk/pypy/module/array/benchmark/intimgtst.c (props changed) pypy/trunk/pypy/module/array/benchmark/intimgtst.py (props changed) pypy/trunk/pypy/module/array/benchmark/loop.c (props changed) pypy/trunk/pypy/module/array/benchmark/sum.c (props changed) pypy/trunk/pypy/module/array/benchmark/sumtst.c (props changed) pypy/trunk/pypy/module/array/benchmark/sumtst.py (props changed) pypy/trunk/pypy/module/array/test/test_array_old.py (props changed) pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py Log: Merged jit-bounds, which splits jit/metainterp/optimizeopt into separate optimizations steps and adds one additional step keeping bounds on integers and removes guards when they are not needed. Modified: pypy/trunk/pypy/jit/metainterp/test/test_basic.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_basic.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_basic.py Thu Sep 9 18:23:42 2010 @@ -455,6 +455,31 @@ # the CALL_PURE is constant-folded away by optimizeopt.py self.check_loops(int_sub=1, call=0, call_pure=0) + def test_pure_function_returning_object(self): + myjitdriver = JitDriver(greens = ['m'], reds = ['n']) + class V: + def __init__(self, x): + self.x = x + v1 = V(1) + v2 = V(2) + def externfn(x): + if x: + return v1 + else: + return v2 + externfn._pure_function_ = True + def f(n, m): + while n > 0: + myjitdriver.can_enter_jit(n=n, m=m) + myjitdriver.jit_merge_point(n=n, m=m) + m = V(m).x + n -= externfn(m).x + externfn(m + m - m).x + return n + res = self.meta_interp(f, [21, 5]) + assert res == -1 + # the CALL_PURE is constant-folded away by optimizeopt.py + self.check_loops(int_sub=1, call=0, call_pure=0, getfield_gc=1) + def test_constant_across_mp(self): myjitdriver = JitDriver(greens = [], reds = ['n']) class X(object): Modified: pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py Thu Sep 9 18:23:42 2010 @@ -4,7 +4,8 @@ #OOtypeMixin, BaseTest) from pypy.jit.metainterp.optimizefindnode import PerfectSpecializationFinder -from pypy.jit.metainterp import optimizeopt +import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt +import pypy.jit.metainterp.optimizeopt.virtualize as virtualize from pypy.jit.metainterp.optimizeopt import optimize_loop_1 from pypy.jit.metainterp.optimizeutil import InvalidLoop from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt @@ -64,7 +65,7 @@ class cpu(object): pass opt = FakeOptimizer() - virt1 = optimizeopt.AbstractVirtualStructValue(opt, None) + virt1 = virtualize.AbstractVirtualStructValue(opt, None) lst1 = virt1._get_field_descr_list() assert lst1 == [] lst2 = virt1._get_field_descr_list() @@ -75,7 +76,7 @@ lst4 = virt1._get_field_descr_list() assert lst3 is lst4 - virt2 = optimizeopt.AbstractVirtualStructValue(opt, None) + virt2 = virtualize.AbstractVirtualStructValue(opt, None) lst5 = virt2._get_field_descr_list() assert lst5 is lst1 virt2.setfield(LLtypeMixin.valuedescr, optimizeopt.OptValue(None)) @@ -88,7 +89,7 @@ self.fieldnums = fieldnums def equals(self, fieldnums): return self.fieldnums == fieldnums - class FakeVirtualValue(optimizeopt.AbstractVirtualValue): + class FakeVirtualValue(virtualize.AbstractVirtualValue): def _make_virtual(self, *args): return FakeVInfo() v1 = FakeVirtualValue(None, None, None) @@ -257,6 +258,7 @@ optimize_loop_1(metainterp_sd, loop) # expected = self.parse(optops) + print '\n'.join([str(o) for o in loop.operations]) self.assert_equal(loop, expected) def test_simple(self): @@ -266,7 +268,13 @@ guard_value(i0, 0) [i0] jump(i) """ - self.optimize_loop(ops, 'Not', ops) + expected = """ + [i] + i0 = int_sub(i, 1) + guard_value(i0, 0) [i0] + jump(1) + """ + self.optimize_loop(ops, 'Not', expected) def test_constant_propagate(self): ops = """ @@ -680,7 +688,13 @@ guard_value(i1, 0) [i] jump(i) """ - self.optimize_loop(ops, 'Not', ops) + expected = """ + [i] + i1 = int_add(i, 3) + guard_value(i1, 0) [i] + jump(-3) + """ + self.optimize_loop(ops, 'Not', expected) def test_int_is_true_of_bool(self): ops = """ @@ -3089,6 +3103,724 @@ ''' self.optimize_loop(ops, 'Not', expected) + def test_bound_lt(self): + ops = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + i2 = int_lt(i0, 5) + guard_true(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_noguard(self): + ops = """ + [i0] + i1 = int_lt(i0, 4) + i2 = int_lt(i0, 5) + jump(i2) + """ + expected = """ + [i0] + i1 = int_lt(i0, 4) + i2 = int_lt(i0, 5) + jump(i2) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_noopt(self): + ops = """ + [i0] + i1 = int_lt(i0, 4) + guard_false(i1) [] + i2 = int_lt(i0, 5) + guard_true(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_lt(i0, 4) + guard_false(i1) [] + i2 = int_lt(i0, 5) + guard_true(i2) [] + jump(4) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_rev(self): + ops = """ + [i0] + i1 = int_lt(i0, 4) + guard_false(i1) [] + i2 = int_gt(i0, 3) + guard_true(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_lt(i0, 4) + guard_false(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_tripple(self): + ops = """ + [i0] + i1 = int_lt(i0, 0) + guard_true(i1) [] + i2 = int_lt(i0, 7) + guard_true(i2) [] + i3 = int_lt(i0, 5) + guard_true(i3) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_lt(i0, 0) + guard_true(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_add(self): + ops = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + i2 = int_add(i0, 10) + i3 = int_lt(i2, 15) + guard_true(i3) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + i2 = int_add(i0, 10) + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_add_before(self): + ops = """ + [i0] + i2 = int_add(i0, 10) + i3 = int_lt(i2, 15) + guard_true(i3) [] + i1 = int_lt(i0, 6) + guard_true(i1) [] + jump(i0) + """ + expected = """ + [i0] + i2 = int_add(i0, 10) + i3 = int_lt(i2, 15) + guard_true(i3) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_add_ovf(self): + ops = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + i2 = int_add_ovf(i0, 10) + guard_no_overflow() [] + i3 = int_lt(i2, 15) + guard_true(i3) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + i2 = int_add(i0, 10) + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_add_ovf_before(self): + ops = """ + [i0] + i2 = int_add_ovf(i0, 10) + guard_no_overflow() [] + i3 = int_lt(i2, 15) + guard_true(i3) [] + i1 = int_lt(i0, 6) + guard_true(i1) [] + jump(i0) + """ + expected = """ + [i0] + i2 = int_add_ovf(i0, 10) + guard_no_overflow() [] + i3 = int_lt(i2, 15) + guard_true(i3) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_sub(self): + ops = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + i2 = int_sub(i0, 10) + i3 = int_lt(i2, -5) + guard_true(i3) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + i2 = int_sub(i0, 10) + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_sub_before(self): + ops = """ + [i0] + i2 = int_sub(i0, 10) + i3 = int_lt(i2, -5) + guard_true(i3) [] + i1 = int_lt(i0, 5) + guard_true(i1) [] + jump(i0) + """ + expected = """ + [i0] + i2 = int_sub(i0, 10) + i3 = int_lt(i2, -5) + guard_true(i3) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_ltle(self): + ops = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + i2 = int_le(i0, 3) + guard_true(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lelt(self): + ops = """ + [i0] + i1 = int_le(i0, 4) + guard_true(i1) [] + i2 = int_lt(i0, 5) + guard_true(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_le(i0, 4) + guard_true(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_gt(self): + ops = """ + [i0] + i1 = int_gt(i0, 5) + guard_true(i1) [] + i2 = int_gt(i0, 4) + guard_true(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_gt(i0, 5) + guard_true(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_gtge(self): + ops = """ + [i0] + i1 = int_gt(i0, 5) + guard_true(i1) [] + i2 = int_ge(i0, 6) + guard_true(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_gt(i0, 5) + guard_true(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_gegt(self): + ops = """ + [i0] + i1 = int_ge(i0, 5) + guard_true(i1) [] + i2 = int_gt(i0, 4) + guard_true(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_ge(i0, 5) + guard_true(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_ovf(self): + ops = """ + [i0] + i1 = int_ge(i0, 0) + guard_true(i1) [] + i2 = int_lt(i0, 10) + guard_true(i2) [] + i3 = int_add_ovf(i0, 1) + guard_no_overflow() [] + jump(i3) + """ + expected = """ + [i0] + i1 = int_ge(i0, 0) + guard_true(i1) [] + i2 = int_lt(i0, 10) + guard_true(i2) [] + i3 = int_add(i0, 1) + jump(i3) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_addsub_const(self): + ops = """ + [i0] + i1 = int_add(i0, 1) + i2 = int_sub(i1, 1) + i3 = int_add(i2, 1) + i4 = int_mul(i2, i3) + jump(i4) + """ + expected = """ + [i0] + i1 = int_add(i0, 1) + i4 = int_mul(i0, i1) + jump(i4) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_addsub_int(self): + ops = """ + [i0, i10] + i1 = int_add(i0, i10) + i2 = int_sub(i1, i10) + i3 = int_add(i2, i10) + i4 = int_add(i2, i3) + jump(i4, i10) + """ + expected = """ + [i0, i10] + i1 = int_add(i0, i10) + i4 = int_add(i0, i1) + jump(i4, i10) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_addsub_int2(self): + ops = """ + [i0, i10] + i1 = int_add(i10, i0) + i2 = int_sub(i1, i10) + i3 = int_add(i10, i2) + i4 = int_add(i2, i3) + jump(i4, i10) + """ + expected = """ + [i0, i10] + i1 = int_add(i10, i0) + i4 = int_add(i0, i1) + jump(i4, i10) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_framestackdepth_overhead(self): + ops = """ + [p0, i22] + i1 = getfield_gc(p0, descr=valuedescr) + i2 = int_gt(i1, i22) + guard_false(i2) [] + i3 = int_add(i1, 1) + setfield_gc(p0, i3, descr=valuedescr) + i4 = int_sub(i3, 1) + setfield_gc(p0, i4, descr=valuedescr) + i5 = int_gt(i4, i22) + guard_false(i5) [] + i6 = int_add(i4, 1) + i331 = force_token() + i7 = int_sub(i6, 1) + setfield_gc(p0, i7, descr=valuedescr) + jump(p0, i22) + """ + expected = """ + [p0, i22] + i1 = getfield_gc(p0, descr=valuedescr) + i2 = int_gt(i1, i22) + guard_false(i2) [] + i3 = int_add(i1, 1) + i331 = force_token() + setfield_gc(p0, i1, descr=valuedescr) + jump(p0, i22) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_addsub_ovf(self): + ops = """ + [i0] + i1 = int_add_ovf(i0, 10) + guard_no_overflow() [] + i2 = int_sub_ovf(i1, 5) + guard_no_overflow() [] + jump(i2) + """ + expected = """ + [i0] + i1 = int_add_ovf(i0, 10) + guard_no_overflow() [] + i2 = int_sub(i1, 5) + jump(i2) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_subadd_ovf(self): + ops = """ + [i0] + i1 = int_sub_ovf(i0, 10) + guard_no_overflow() [] + i2 = int_add_ovf(i1, 5) + guard_no_overflow() [] + jump(i2) + """ + expected = """ + [i0] + i1 = int_sub_ovf(i0, 10) + guard_no_overflow() [] + i2 = int_add(i1, 5) + jump(i2) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_and(self): + ops = """ + [i0] + i1 = int_and(i0, 255) + i2 = int_lt(i1, 500) + guard_true(i2) [] + i3 = int_le(i1, 255) + guard_true(i3) [] + i4 = int_gt(i1, -1) + guard_true(i4) [] + i5 = int_ge(i1, 0) + guard_true(i5) [] + i6 = int_lt(i1, 0) + guard_false(i6) [] + i7 = int_le(i1, -1) + guard_false(i7) [] + i8 = int_gt(i1, 255) + guard_false(i8) [] + i9 = int_ge(i1, 500) + guard_false(i9) [] + i12 = int_lt(i1, 100) + guard_true(i12) [] + i13 = int_le(i1, 90) + guard_true(i13) [] + i14 = int_gt(i1, 10) + guard_true(i14) [] + i15 = int_ge(i1, 20) + guard_true(i15) [] + jump(i1) + """ + expected = """ + [i0] + i1 = int_and(i0, 255) + i12 = int_lt(i1, 100) + guard_true(i12) [] + i13 = int_le(i1, 90) + guard_true(i13) [] + i14 = int_gt(i1, 10) + guard_true(i14) [] + i15 = int_ge(i1, 20) + guard_true(i15) [] + jump(i1) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_subsub_ovf(self): + ops = """ + [i0] + i1 = int_sub_ovf(1, i0) + guard_no_overflow() [] + i2 = int_gt(i1, 1) + guard_true(i2) [] + i3 = int_sub_ovf(1, i0) + guard_no_overflow() [] + i4 = int_gt(i3, 1) + guard_true(i4) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_sub_ovf(1, i0) + guard_no_overflow() [] + i2 = int_gt(i1, 1) + guard_true(i2) [] + i3 = int_sub(1, i0) + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_eq(self): + ops = """ + [i0, i1] + i2 = int_le(i0, 4) + guard_true(i2) [] + i3 = int_eq(i0, i1) + guard_true(i3) [] + i4 = int_lt(i1, 5) + guard_true(i4) [] + jump(i0, i1) + """ + expected = """ + [i0, i1] + i2 = int_le(i0, 4) + guard_true(i2) [] + i3 = int_eq(i0, i1) + guard_true(i3) [] + jump(i0, i1) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_bound_eq_const(self): + ops = """ + [i0] + i1 = int_eq(i0, 7) + guard_true(i1) [] + i2 = int_add(i0, 3) + jump(i2) + """ + expected = """ + [i0] + i1 = int_eq(i0, 7) + guard_true(i1) [] + jump(10) + + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_eq_const_not(self): + ops = """ + [i0] + i1 = int_eq(i0, 7) + guard_false(i1) [] + i2 = int_add(i0, 3) + jump(i2) + """ + expected = """ + [i0] + i1 = int_eq(i0, 7) + guard_false(i1) [] + i2 = int_add(i0, 3) + jump(i2) + + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_ne_const(self): + ops = """ + [i0] + i1 = int_ne(i0, 7) + guard_false(i1) [] + i2 = int_add(i0, 3) + jump(i2) + """ + expected = """ + [i0] + i1 = int_ne(i0, 7) + guard_false(i1) [] + jump(10) + + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_ne_const_not(self): + ops = """ + [i0] + i1 = int_ne(i0, 7) + guard_true(i1) [] + i2 = int_add(i0, 3) + jump(i2) + """ + expected = """ + [i0] + i1 = int_ne(i0, 7) + guard_true(i1) [] + i2 = int_add(i0, 3) + jump(i2) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_ltne(self): + ops = """ + [i0, i1] + i2 = int_lt(i0, 7) + guard_true(i2) [] + i3 = int_ne(i0, 10) + guard_true(i2) [] + jump(i0, i1) + """ + expected = """ + [i0, i1] + i2 = int_lt(i0, 7) + guard_true(i2) [] + jump(i0, i1) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_bound_lege_const(self): + ops = """ + [i0] + i1 = int_ge(i0, 7) + guard_true(i1) [] + i2 = int_le(i0, 7) + guard_true(i2) [] + i3 = int_add(i0, 3) + jump(i3) + """ + expected = """ + [i0] + i1 = int_ge(i0, 7) + guard_true(i1) [] + i2 = int_le(i0, 7) + guard_true(i2) [] + jump(10) + + """ + self.optimize_loop(ops, 'Not', expected) + + def test_mul_ovf(self): + ops = """ + [i0, i1] + i2 = int_and(i0, 255) + i3 = int_lt(i1, 5) + guard_true(i3) [] + i4 = int_gt(i1, -10) + guard_true(i4) [] + i5 = int_mul_ovf(i2, i1) + guard_no_overflow() [] + i6 = int_lt(i5, -2550) + guard_false(i6) [] + i7 = int_ge(i5, 1276) + guard_false(i7) [] + i8 = int_gt(i5, 126) + guard_true(i8) [] + jump(i0, i1) + """ + expected = """ + [i0, i1] + i2 = int_and(i0, 255) + i3 = int_lt(i1, 5) + guard_true(i3) [] + i4 = int_gt(i1, -10) + guard_true(i4) [] + i5 = int_mul(i2, i1) + i8 = int_gt(i5, 126) + guard_true(i8) [] + jump(i0, i1) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_mul_ovf_before(self): + ops = """ + [i0, i1] + i2 = int_and(i0, 255) + i22 = int_add(i2, 1) + i3 = int_mul_ovf(i22, i1) + guard_no_overflow() [] + i4 = int_lt(i3, 10) + guard_true(i4) [] + i5 = int_gt(i3, 2) + guard_true(i5) [] + i6 = int_lt(i1, 0) + guard_false(i6) [] + jump(i0, i1) + """ + expected = """ + [i0, i1] + i2 = int_and(i0, 255) + i22 = int_add(i2, 1) + i3 = int_mul_ovf(i22, i1) + guard_no_overflow() [] + i4 = int_lt(i3, 10) + guard_true(i4) [] + i5 = int_gt(i3, 2) + guard_true(i5) [] + jump(i0, i1) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_sub_ovf_before(self): + ops = """ + [i0, i1] + i2 = int_and(i0, 255) + i3 = int_sub_ovf(i2, i1) + guard_no_overflow() [] + i4 = int_le(i3, 10) + guard_true(i4) [] + i5 = int_ge(i3, 2) + guard_true(i5) [] + i6 = int_lt(i1, -10) + guard_false(i6) [] + i7 = int_gt(i1, 253) + guard_false(i7) [] + jump(i0, i1) + """ + expected = """ + [i0, i1] + i2 = int_and(i0, 255) + i3 = int_sub_ovf(i2, i1) + guard_no_overflow() [] + i4 = int_le(i3, 10) + guard_true(i4) [] + i5 = int_ge(i3, 2) + guard_true(i5) [] + jump(i0, i1) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + + + ##class TestOOtype(BaseTestOptimizeOpt, OOtypeMixin): ## def test_instanceof(self): Modified: pypy/trunk/pypy/jit/metainterp/test/test_resume.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_resume.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_resume.py Thu Sep 9 18:23:42 2010 @@ -1,7 +1,7 @@ import py from pypy.rpython.lltypesystem import lltype, llmemory, rffi -from pypy.jit.metainterp.optimizeopt import VirtualValue, OptValue, VArrayValue -from pypy.jit.metainterp.optimizeopt import VStructValue +from pypy.jit.metainterp.optimizeopt.virtualize import VirtualValue, OptValue, VArrayValue +from pypy.jit.metainterp.optimizeopt.virtualize import VStructValue from pypy.jit.metainterp.resume import * from pypy.jit.metainterp.history import BoxInt, BoxPtr, ConstInt from pypy.jit.metainterp.history import ConstPtr, ConstFloat Modified: pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py ============================================================================== --- pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py (original) +++ pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py Thu Sep 9 18:23:42 2010 @@ -110,6 +110,7 @@ if sys.platform.startswith('win'): py.test.skip("XXX this is not Windows-friendly") + print logfilepath child_stdout = os.popen('PYPYLOG=":%s" "%s" "%s"' % ( logfilepath, self.pypy_c, filepath), 'r') result = child_stdout.read() @@ -118,6 +119,7 @@ assert result.splitlines()[-1].strip() == 'OK :-)' self.parse_loops(logfilepath) self.print_loops() + print logfilepath if self.total_ops > expected_max_ops: assert 0, "too many operations: got %d, expected maximum %d" % ( self.total_ops, expected_max_ops) @@ -846,6 +848,221 @@ return intimg[i - 1] ''', maxops, ([tc], res)) + def test_intbound_simple(self): + ops = ('<', '>', '<=', '>=', '==', '!=') + nbr = (3, 7) + for o1 in ops: + for o2 in ops: + for n1 in nbr: + for n2 in nbr: + src = ''' + def f(i): + a, b = 3, 3 + if i %s %d: + a = 0 + else: + a = 1 + if i %s %d: + b = 0 + else: + b = 1 + return a + b * 2 + + def main(): + res = [0] * 4 + idx = [] + for i in range(15): + idx.extend([i] * 1500) + for i in idx: + res[f(i)] += 1 + return res + + ''' % (o1, n1, o2, n2) + + exec(str(py.code.Source(src))) + res = [0] * 4 + for i in range(15): + res[f(i)] += 1500 + self.run_source(src, 268, ([], res)) + + def test_intbound_addsub_mix(self): + tests = ('i > 4', 'i > 2', 'i + 1 > 2', '1 + i > 4', + 'i - 1 > 1', '1 - i > 1', '1 - i < -3', + 'i == 1', 'i == 5', 'i != 1', '-2 * i < -4') + for t1 in tests: + for t2 in tests: + print t1, t2 + src = ''' + def f(i): + a, b = 3, 3 + if %s: + a = 0 + else: + a = 1 + if %s: + b = 0 + else: + b = 1 + return a + b * 2 + + def main(): + res = [0] * 4 + idx = [] + for i in range(15): + idx.extend([i] * 1500) + for i in idx: + res[f(i)] += 1 + return res + + ''' % (t1, t2) + + exec(str(py.code.Source(src))) + res = [0] * 4 + for i in range(15): + res[f(i)] += 1500 + self.run_source(src, 280, ([], res)) + + def test_intbound_gt(self): + self.run_source(''' + def main(): + i, a, b = 0, 0, 0 + while i < 2000: + if i > -1: + a += 1 + if i > -2: + b += 1 + i += 1 + return (a, b) + ''', 48, ([], (2000, 2000))) + + def test_intbound_sub_lt(self): + self.run_source(''' + def main(): + i, a, b = 0, 0, 0 + while i < 2000: + if i - 10 < 1995: + a += 1 + i += 1 + return (a, b) + ''', 38, ([], (2000, 0))) + + def test_intbound_addsub_ge(self): + self.run_source(''' + def main(): + i, a, b = 0, 0, 0 + while i < 2000: + if i + 5 >= 5: + a += 1 + if i - 1 >= -1: + b += 1 + i += 1 + return (a, b) + ''', 56, ([], (2000, 2000))) + + def test_intbound_addmul_ge(self): + self.run_source(''' + def main(): + i, a, b = 0, 0, 0 + while i < 2000: + if i + 5 >= 5: + a += 1 + if 2 * i >= 0: + b += 1 + i += 1 + return (a, b) + ''', 53, ([], (2000, 2000))) + + def test_intbound_eq(self): + self.run_source(''' + def main(a): + i, s = 0, 0 + while i < 1500: + if a == 7: + s += a + 1 + elif i == 10: + s += i + else: + s += 1 + i += 1 + return s + ''', 69, ([7], 12000), ([42], 1509), ([10], 1509)) + + def test_intbound_mul(self): + self.run_source(''' + def main(a): + i, s = 0, 0 + while i < 1500: + assert i >= 0 + if 2 * i < 30000: + s += 1 + else: + s += a + i += 1 + return s + ''', 43, ([7], 1500)) + + def test_assert(self): + self.run_source(''' + def main(a): + i, s = 0, 0 + while i < 1500: + assert a == 7 + s += a + 1 + i += 1 + return s + ''', 38, ([7], 8*1500)) + + def test_zeropadded(self): + self.run_source(''' + from array import array + class ZeroPadded(array): + def __new__(cls, l): + self = array.__new__(cls, 'd', range(l)) + return self + + def __getitem__(self, i): + if i < 0 or i >= self.__len__(): + return 0 + return array.__getitem__(self, i) + + + def main(): + buf = ZeroPadded(2000) + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + + ''', 232, ([], 9895050.0)) + + def test_circular(self): + self.run_source(''' + from array import array + class Circular(array): + def __new__(cls): + self = array.__new__(cls, 'd', range(256)) + return self + def __getitem__(self, i): + # assert self.__len__() == 256 (FIXME: does not improve) + return array.__getitem__(self, i & 255) + + def main(): + buf = Circular() + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + + ''', 170, ([], 1239690.0)) + + + + # test_circular + class AppTestJIT(PyPyCJITTests): def setup_class(cls): if not option.runappdirect: From arigo at codespeak.net Thu Sep 9 19:36:29 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 9 Sep 2010 19:36:29 +0200 (CEST) Subject: [pypy-svn] r76983 - pypy/branch/jit-generator/pypy/jit/metainterp Message-ID: <20100909173629.751ED282B9C@codespeak.net> Author: arigo Date: Thu Sep 9 19:36:27 2010 New Revision: 76983 Modified: pypy/branch/jit-generator/pypy/jit/metainterp/warmspot.py Log: I *think* that there is no point in calling _maybe_enter_from_start() at any point after the initial entry. Modified: pypy/branch/jit-generator/pypy/jit/metainterp/warmspot.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/warmspot.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/warmspot.py Thu Sep 9 19:36:27 2010 @@ -584,9 +584,11 @@ ts = self.cpu.ts def ll_portal_runner(*args): + start = True while 1: try: - jd._maybe_enter_from_start_fn(*args) + if start: + jd._maybe_enter_from_start_fn(*args) return support.maybe_on_top_of_llinterp(rtyper, portal_ptr)(*args) except self.ContinueRunningNormally, e: @@ -595,6 +597,8 @@ x = getattr(e, attrname)[count] x = specialize_value(ARGTYPE, x) args = args + (x,) + start = False + continue except self.DoneWithThisFrameVoid: assert result_kind == 'void' return From arigo at codespeak.net Thu Sep 9 19:38:25 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 9 Sep 2010 19:38:25 +0200 (CEST) Subject: [pypy-svn] r76984 - in pypy/branch/jit-generator/pypy/jit: backend/llgraph metainterp metainterp/test Message-ID: <20100909173825.51BA9282B9C@codespeak.net> Author: arigo Date: Thu Sep 9 19:38:23 2010 New Revision: 76984 Modified: pypy/branch/jit-generator/pypy/jit/backend/llgraph/llimpl.py pypy/branch/jit-generator/pypy/jit/metainterp/compile.py pypy/branch/jit-generator/pypy/jit/metainterp/jitdriver.py pypy/branch/jit-generator/pypy/jit/metainterp/pyjitpl.py pypy/branch/jit-generator/pypy/jit/metainterp/test/test_compile.py pypy/branch/jit-generator/pypy/jit/metainterp/warmstate.py Log: In-progress. Based on compile_tmp_callback() which makes machine code which actually just calls the portal (and thus the interpreter). With this, we can at least always use CALL_ASSEMBLER, even if no real machine code was produced for the target so far. The next step is to patch the machine code's original CALL_ASSEMBLER to jump to the real place. Modified: pypy/branch/jit-generator/pypy/jit/backend/llgraph/llimpl.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/backend/llgraph/llimpl.py (original) +++ pypy/branch/jit-generator/pypy/jit/backend/llgraph/llimpl.py Thu Sep 9 19:38:23 2010 @@ -128,7 +128,7 @@ 'getarrayitem_raw_pure' : (('ref', 'int'), 'intorptr'), 'arraylen_gc' : (('ref',), 'int'), 'call' : (('ref', 'varargs'), 'intorptr'), - 'call_assembler' : (('ref', 'varargs'), 'intorptr'), + 'call_assembler' : (('varargs',), 'intorptr'), 'cond_call_gc_wb' : (('ptr', 'ptr'), None), 'oosend' : (('varargs',), 'intorptr'), 'oosend_pure' : (('varargs',), 'intorptr'), Modified: pypy/branch/jit-generator/pypy/jit/metainterp/compile.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/compile.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/compile.py Thu Sep 9 19:38:23 2010 @@ -14,6 +14,7 @@ from pypy.jit.metainterp.specnode import NotSpecNode, more_general_specnodes from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.jit.metainterp.optimizeutil import InvalidLoop +from pypy.jit.codewriter import heaptracker def giveup(): from pypy.jit.metainterp.pyjitpl import SwitchToBlackhole @@ -547,3 +548,51 @@ descr = target_loop_token.finishdescr new_op = ResOperation(rop.FINISH, op.args, None, descr=descr) new_loop.operations[-1] = new_op + +# ____________________________________________________________ + +class PropagateExceptionDescr(AbstractFailDescr): + def handle_fail(self, metainterp_sd, jitdriver_sd): + cpu = metainterp_sd.cpu + exception = self.cpu.grab_exc_value() + raise metainterp_sd.ExitFrameWithExceptionRef(cpu, exception) + +def compile_tmp_callback(cpu, jitdriver_sd, greenkey, redboxes): + """Make a LoopToken that corresponds to assembler code that just + calls back the interpreter. Used temporarily: a fully compiled + version of the code may end up replacing it. + """ + # 'redboxes' is only used to know the types of red arguments + inputargs = [box.clonebox() for box in redboxes] + loop_token = make_loop_token(len(inputargs), jitdriver_sd) + # + k = jitdriver_sd.portal_runner_adr + funcbox = history.ConstInt(heaptracker.adr2int(k)) + args = [funcbox] + greenkey + inputargs + # + result_type = jitdriver_sd.result_type + if result_type == history.INT: + result = BoxInt() + elif result_type == history.REF: + result = BoxPtr() + elif result_type == history.FLOAT: + result = BoxFloat() + elif result_type == history.VOID: + result = None + else: + assert 0, "bad result_type" + if result is not None: + finishargs = [] + else: + finishargs = [result] + # + jd = jitdriver_sd + faildescr = PropagateExceptionDescr() + operations = [ + ResOperation(rop.CALL, args, result, descr=jd.portal_calldescr), + ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=faildescr), + ResOperation(rop.FINISH, finishargs, None, descr=jd.portal_finishtoken) + ] + operations[1].fail_args = [] + cpu.compile_loop(inputargs, operations, loop_token) + return loop_token Modified: pypy/branch/jit-generator/pypy/jit/metainterp/jitdriver.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/jitdriver.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/jitdriver.py Thu Sep 9 19:38:23 2010 @@ -14,6 +14,7 @@ # self.virtualizable_info... pypy.jit.metainterp.warmspot # self.warmstate ... pypy.jit.metainterp.warmspot # self.handle_jitexc_from_bh pypy.jit.metainterp.warmspot + # self.portal_finishtoken... pypy.jit.metainterp.pyjitpl # self.index ... pypy.jit.codewriter.call # self.mainjitcode ... pypy.jit.codewriter.call @@ -21,6 +22,7 @@ # self.assembler_helper_adr # self.index_of_virtualizable # self.vable_token_descr + # self.portal_calldescr # warmspot sets extra attributes starting with '_' for its own use. Modified: pypy/branch/jit-generator/pypy/jit/metainterp/pyjitpl.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/pyjitpl.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/pyjitpl.py Thu Sep 9 19:38:23 2010 @@ -696,7 +696,7 @@ portal_code = targetjitdriver_sd.mainjitcode return self.metainterp.perform_call(portal_code, allboxes, greenkey=greenboxes) - token = warmrunnerstate.get_assembler_token(greenboxes) + token = warmrunnerstate.get_assembler_token(greenboxes, redboxes) # verify that we have all green args, needed to make sure # that assembler that we call is still correct self.verify_green_args(targetjitdriver_sd, greenboxes) @@ -829,7 +829,7 @@ self.pc = saved_pc else: warmrunnerstate = jitdriver_sd.warmstate - token = warmrunnerstate.get_assembler_token(greenboxes) + token = warmrunnerstate.get_assembler_token(greenboxes, redboxes) # warning! careful here. We have to return from the current # frame containing the jit_merge_point, and then use # do_recursive_call() to follow the recursive call. This is @@ -1217,6 +1217,7 @@ history.FLOAT: 'float', history.VOID: 'void'}[jd.result_type] tokens = getattr(self, 'loop_tokens_done_with_this_frame_%s' % name) + jd.portal_finishtoken = tokens[0].finishdescr num = self.cpu.get_fail_descr_number(tokens[0].finishdescr) setattr(self.cpu, 'done_with_this_frame_%s_v' % name, num) # Modified: pypy/branch/jit-generator/pypy/jit/metainterp/test/test_compile.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/test/test_compile.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/test/test_compile.py Thu Sep 9 19:38:23 2010 @@ -1,10 +1,11 @@ from pypy.jit.metainterp.history import LoopToken, ConstInt, History, Stats -from pypy.jit.metainterp.history import BoxInt +from pypy.jit.metainterp.history import BoxInt, INT from pypy.jit.metainterp.specnode import NotSpecNode, ConstantSpecNode from pypy.jit.metainterp.compile import insert_loop_token, compile_new_loop from pypy.jit.metainterp.compile import ResumeGuardDescr from pypy.jit.metainterp.compile import ResumeGuardCountersInt -from pypy.jit.metainterp import optimize, jitprof, typesystem +from pypy.jit.metainterp.compile import compile_tmp_callback +from pypy.jit.metainterp import optimize, jitprof, typesystem, compile from pypy.jit.metainterp.test.oparser import parse from pypy.jit.metainterp.test.test_optimizefindnode import LLtypeMixin @@ -154,3 +155,47 @@ count = rgc.see_int(192) assert count == 1 assert rgc.counters == [1, 1, 7, 6, 1] + + +def test_compile_tmp_callback(): + from pypy.jit.codewriter import heaptracker + from pypy.jit.backend.llgraph import runner + from pypy.rpython.lltypesystem import lltype, llmemory + from pypy.rpython.annlowlevel import llhelper + from pypy.rpython.llinterp import LLException + # + cpu = runner.LLtypeCPU(None) + FUNC = lltype.FuncType([lltype.Signed]*4, lltype.Signed) + def ll_portal_runner(g1, g2, r3, r4): + assert (g1, g2, r3, r4) == (12, 34, -156, -178) + if raiseme: + raise raiseme + else: + return 54321 + # + class FakeJitDriverSD: + portal_runner_ptr = llhelper(lltype.Ptr(FUNC), ll_portal_runner) + portal_runner_adr = llmemory.cast_ptr_to_adr(portal_runner_ptr) + portal_calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + portal_finishtoken = compile.DoneWithThisFrameDescrInt() + result_type = INT + # + loop_token = compile_tmp_callback(cpu, FakeJitDriverSD(), + [ConstInt(12), ConstInt(34)], + [BoxInt(56), ConstInt(78)]) + # + raiseme = None + cpu.set_future_value_int(0, -156) + cpu.set_future_value_int(1, -178) + fail_descr = cpu.execute_token(loop_token) + assert fail_descr is FakeJitDriverSD().portal_finishtoken + # + EXC = lltype.GcStruct('EXC') + llexc = lltype.malloc(EXC) + raiseme = LLException("exception class", llexc) + cpu.set_future_value_int(0, -156) + cpu.set_future_value_int(1, -178) + fail_descr = cpu.execute_token(loop_token) + assert isinstance(fail_descr, compile.PropagateExceptionDescr) + got = cpu.grab_exc_value() + assert lltype.cast_opaque_ptr(lltype.Ptr(EXC), got) == llexc Modified: pypy/branch/jit-generator/pypy/jit/metainterp/warmstate.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/warmstate.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/warmstate.py Thu Sep 9 19:38:23 2010 @@ -491,6 +491,8 @@ # unwrap_greenkey = self.make_unwrap_greenkey() jit_getter = self.make_jitcell_getter() + jd = self.jitdriver_sd + cpu = warmrunnerdesc.cpu def can_inline_greenargs(*greenargs): if can_never_inline(*greenargs): @@ -505,11 +507,14 @@ self.can_inline_greenargs = can_inline_greenargs self.can_inline_callable = can_inline_callable - def get_assembler_token(greenkey): + def get_assembler_token(greenkey, redboxes): + # 'redboxes' is only used to know the types of red arguments greenargs = unwrap_greenkey(greenkey) - cell = jit_getter(False, *greenargs) - if cell is None or cell.counter >= 0: - return None + cell = jit_getter(True, *greenargs) + if cell.entry_loop_token is None: + from pypy.jit.metainterp.compile import compile_tmp_callback + cell.entry_loop_token = compile_tmp_callback(cpu, jd, greenkey, + redboxes) return cell.entry_loop_token self.get_assembler_token = get_assembler_token From arigo at codespeak.net Thu Sep 9 19:43:48 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 9 Sep 2010 19:43:48 +0200 (CEST) Subject: [pypy-svn] r76986 - in pypy/branch/jit-generator/pypy/jit/metainterp: . test Message-ID: <20100909174348.53F3D282B9C@codespeak.net> Author: arigo Date: Thu Sep 9 19:43:46 2010 New Revision: 76986 Modified: pypy/branch/jit-generator/pypy/jit/metainterp/compile.py pypy/branch/jit-generator/pypy/jit/metainterp/test/test_compile.py Log: Test and fix. Modified: pypy/branch/jit-generator/pypy/jit/metainterp/compile.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/compile.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/compile.py Thu Sep 9 19:43:46 2010 @@ -554,9 +554,11 @@ class PropagateExceptionDescr(AbstractFailDescr): def handle_fail(self, metainterp_sd, jitdriver_sd): cpu = metainterp_sd.cpu - exception = self.cpu.grab_exc_value() + exception = cpu.grab_exc_value() raise metainterp_sd.ExitFrameWithExceptionRef(cpu, exception) +propagate_exception_descr = PropagateExceptionDescr() + def compile_tmp_callback(cpu, jitdriver_sd, greenkey, redboxes): """Make a LoopToken that corresponds to assembler code that just calls back the interpreter. Used temporarily: a fully compiled @@ -568,7 +570,7 @@ # k = jitdriver_sd.portal_runner_adr funcbox = history.ConstInt(heaptracker.adr2int(k)) - args = [funcbox] + greenkey + inputargs + callargs = [funcbox] + greenkey + inputargs # result_type = jitdriver_sd.result_type if result_type == history.INT: @@ -587,9 +589,9 @@ finishargs = [result] # jd = jitdriver_sd - faildescr = PropagateExceptionDescr() + faildescr = propagate_exception_descr operations = [ - ResOperation(rop.CALL, args, result, descr=jd.portal_calldescr), + ResOperation(rop.CALL, callargs, result, descr=jd.portal_calldescr), ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=faildescr), ResOperation(rop.FINISH, finishargs, None, descr=jd.portal_finishtoken) ] Modified: pypy/branch/jit-generator/pypy/jit/metainterp/test/test_compile.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/test/test_compile.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/test/test_compile.py Thu Sep 9 19:43:46 2010 @@ -199,3 +199,19 @@ assert isinstance(fail_descr, compile.PropagateExceptionDescr) got = cpu.grab_exc_value() assert lltype.cast_opaque_ptr(lltype.Ptr(EXC), got) == llexc + # + class FakeMetaInterpSD: + class ExitFrameWithExceptionRef(Exception): + pass + FakeMetaInterpSD.cpu = cpu + class FakeJitDriverSD: + pass + cpu.set_future_value_int(0, -156) + cpu.set_future_value_int(1, -178) + fail_descr = cpu.execute_token(loop_token) + try: + fail_descr.handle_fail(FakeMetaInterpSD(), FakeJitDriverSD()) + except FakeMetaInterpSD.ExitFrameWithExceptionRef, e: + assert lltype.cast_opaque_ptr(lltype.Ptr(EXC), e.args[1]) == llexc + else: + assert 0, "should have raised" From hakanardo at codespeak.net Fri Sep 10 07:56:06 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Fri, 10 Sep 2010 07:56:06 +0200 (CEST) Subject: [pypy-svn] r76993 - in pypy/trunk/pypy/module/array: . test Message-ID: <20100910055606.72FD4282B9C@codespeak.net> Author: hakanardo Date: Fri Sep 10 07:56:03 2010 New Revision: 76993 Modified: pypy/trunk/pypy/module/array/interp_array.py pypy/trunk/pypy/module/array/test/test_array.py Log: cpython compability for array subclasses overriding __new__ but not __init__ Modified: pypy/trunk/pypy/module/array/interp_array.py ============================================================================== --- pypy/trunk/pypy/module/array/interp_array.py (original) +++ pypy/trunk/pypy/module/array/interp_array.py Fri Sep 10 07:56:03 2010 @@ -26,6 +26,11 @@ raise OperationError(space.w_TypeError, space.wrap(msg)) typecode = typecode[0] + if space.is_w(w_cls, space.gettypeobject(W_ArrayBase.typedef)): + if len(w_args.keywords_w) > 0: + msg = 'array.array() does not take keyword arguments' + raise OperationError(space.w_TypeError, space.wrap(msg)) + for tc in unroll_typecodes: if typecode == tc: a = space.allocate_instance(types[tc].w_class, w_cls) @@ -99,7 +104,6 @@ def register(typeorder): typeorder[W_ArrayBase] = [] - class TypeCode(object): def __init__(self, itemtype, unwrap, canoverflow=False, signed=False): self.itemtype = itemtype @@ -646,12 +650,6 @@ s = "array('%s', %s)" % (self.typecode, space.str_w(r)) return space.wrap(s) - init_signature = Signature(['typecode', 'initializer']) - init_defaults = [None, None] - - def init__Array(space, self, args): - args.parse_obj(None, 'array', init_signature, init_defaults) - mytype.w_class = W_Array # Annotator seems to mess up if the names are not unique Modified: pypy/trunk/pypy/module/array/test/test_array.py ============================================================================== --- pypy/trunk/pypy/module/array/test/test_array.py (original) +++ pypy/trunk/pypy/module/array/test/test_array.py Fri Sep 10 07:56:03 2010 @@ -483,7 +483,6 @@ b = self.array(t, v1) c = self.array(t, v2) - print (a==7) assert (a == 7) is False assert (comparable() == a) is True assert (a == comparable()) is True @@ -697,7 +696,6 @@ assert isinstance(self.array(t), self.array) def test_subclass(self): - print type(self.array('b')) assert len(self.array('b')) == 0 a = self.array('i') @@ -708,14 +706,43 @@ class adder(array): def __getitem__(self, i): - print 25 return array.__getitem__(self, i) + 1 a = adder('i', (1, 2, 3)) - print type(a) assert len(a) == 3 assert a[0] == 2 + def test_subclass_new(self): + array = self.array + class Image(array): + def __new__(cls, width, height, typecode='d'): + self = array.__new__(cls, typecode, [0] * (width * height)) + self.width = width + self.height = height + return self + + def _index(self, (x,y)): + x = min(max(x, 0), self.width-1) + y = min(max(y, 0), self.height-1) + return y * self.width + x + + def __getitem__(self, i): + return array.__getitem__(self, self._index(i)) + + def __setitem__(self, i, val): + return array.__setitem__(self, self._index(i), val) + + img = Image(5, 10, 'B') + for y in range(10): + for x in range(5): + img[x, y] = x * y + for y in range(10): + for x in range(5): + assert img[x, y] == x * y + + assert img[3, 25] == 3 * 9 + + def test_override_from(self): class mya(self.array): def fromlist(self, lst): From arigo at codespeak.net Fri Sep 10 11:51:09 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 10 Sep 2010 11:51:09 +0200 (CEST) Subject: [pypy-svn] r76994 - in pypy/branch/jit-generator/pypy/jit: backend backend/llgraph backend/test backend/x86 metainterp metainterp/test Message-ID: <20100910095109.99B8E282B9C@codespeak.net> Author: arigo Date: Fri Sep 10 11:51:07 2010 New Revision: 76994 Modified: pypy/branch/jit-generator/pypy/jit/backend/llgraph/llimpl.py pypy/branch/jit-generator/pypy/jit/backend/llgraph/runner.py pypy/branch/jit-generator/pypy/jit/backend/model.py pypy/branch/jit-generator/pypy/jit/backend/test/runner_test.py pypy/branch/jit-generator/pypy/jit/backend/x86/assembler.py pypy/branch/jit-generator/pypy/jit/backend/x86/runner.py pypy/branch/jit-generator/pypy/jit/metainterp/compile.py pypy/branch/jit-generator/pypy/jit/metainterp/jitdriver.py pypy/branch/jit-generator/pypy/jit/metainterp/pyjitpl.py pypy/branch/jit-generator/pypy/jit/metainterp/test/test_recursive.py pypy/branch/jit-generator/pypy/jit/metainterp/test/test_warmstate.py pypy/branch/jit-generator/pypy/jit/metainterp/warmspot.py pypy/branch/jit-generator/pypy/jit/metainterp/warmstate.py Log: Finish implementing and using redirect_call_assembler. Modified: pypy/branch/jit-generator/pypy/jit/backend/llgraph/llimpl.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/backend/llgraph/llimpl.py (original) +++ pypy/branch/jit-generator/pypy/jit/backend/llgraph/llimpl.py Fri Sep 10 11:51:07 2010 @@ -165,6 +165,9 @@ self.inputargs = [] self.operations = [] + def getargtypes(self): + return [v.concretetype for v in self.inputargs] + def __repr__(self): lines = [] self.as_text(lines, 1) @@ -839,6 +842,8 @@ def op_call_assembler(self, loop_token, *args): global _last_exception assert not self._forced + loop_token = self.cpu._redirected_call_assembler.get(loop_token, + loop_token) self._may_force = self.opindex try: inpargs = _from_opaque(loop_token._llgraph_compiled_version).inputargs @@ -861,6 +866,21 @@ vable = args[jd.index_of_virtualizable] else: vable = lltype.nullptr(llmemory.GCREF.TO) + # + # Emulate the fast path + if failindex == self.cpu.done_with_this_frame_int_v: + reset_vable(jd, vable) + return self.cpu.get_latest_value_int(0) + if failindex == self.cpu.done_with_this_frame_ref_v: + reset_vable(jd, vable) + return self.cpu.get_latest_value_ref(0) + if failindex == self.cpu.done_with_this_frame_float_v: + reset_vable(jd, vable) + return self.cpu.get_latest_value_float(0) + if failindex == self.cpu.done_with_this_frame_void_v: + reset_vable(jd, vable) + return None + # assembler_helper_ptr = jd.assembler_helper_adr.ptr # fish try: return assembler_helper_ptr(failindex, vable) @@ -1480,6 +1500,17 @@ else: return 0 +def reset_vable(jd, vable): + if jd.index_of_virtualizable != -1: + fielddescr = jd.vable_token_descr + do_setfield_gc_int(vable, fielddescr.ofs, 0) + +def redirect_call_assembler(cpu, oldlooptoken, newlooptoken): + OLD = _from_opaque(oldlooptoken._llgraph_compiled_version).getargtypes() + NEW = _from_opaque(newlooptoken._llgraph_compiled_version).getargtypes() + assert OLD == NEW + cpu._redirected_call_assembler[oldlooptoken] = newlooptoken + # ____________________________________________________________ Modified: pypy/branch/jit-generator/pypy/jit/backend/llgraph/runner.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/backend/llgraph/runner.py (original) +++ pypy/branch/jit-generator/pypy/jit/backend/llgraph/runner.py Fri Sep 10 11:51:07 2010 @@ -102,6 +102,7 @@ llimpl._llinterp = LLInterpreter(self.rtyper) self._future_values = [] self._descrs = {} + self._redirected_call_assembler = {} def _freeze_(self): assert self.translate_support_code @@ -169,8 +170,8 @@ elif isinstance(x, history.ConstFloat): llimpl.compile_add_float_const(c, x.value) else: - raise Exception("%s args contain: %r" % (op.getopname(), - x)) + raise Exception("'%s' args contain: %r" % (op.getopname(), + x)) if op.is_guard(): faildescr = op.descr assert isinstance(faildescr, history.AbstractFailDescr) @@ -260,6 +261,11 @@ def clear_latest_values(self, count): llimpl.frame_clear_latest_values(self.latest_frame, count) + def redirect_call_assembler(self, oldlooptoken, newlooptoken): + if we_are_translated(): + raise ValueError("CALL_ASSEMBLER not supported") + llimpl.redirect_call_assembler(self, oldlooptoken, newlooptoken) + # ---------- def sizeof(self, S): Modified: pypy/branch/jit-generator/pypy/jit/backend/model.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/backend/model.py (original) +++ pypy/branch/jit-generator/pypy/jit/backend/model.py Fri Sep 10 11:51:07 2010 @@ -107,6 +107,12 @@ GUARD_NO_EXCEPTION. (Returns a GCREF)""" # XXX remove me raise NotImplementedError + def redirect_call_assembler(self, oldlooptoken, newlooptoken): + """Redirect oldlooptoken to newlooptoken. More precisely, it is + enough to redirect all CALL_ASSEMBLERs already compiled that call + oldlooptoken so that from now own they will call newlooptoken.""" + raise NotImplementedError + @staticmethod def sizeof(S): raise NotImplementedError Modified: pypy/branch/jit-generator/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/backend/test/runner_test.py (original) +++ pypy/branch/jit-generator/pypy/jit/backend/test/runner_test.py Fri Sep 10 11:51:07 2010 @@ -1824,6 +1824,7 @@ f2 = float_add(f0, f1) finish(f2)''' loop = parse(ops) + done_number = self.cpu.get_fail_descr_number(loop.operations[-1].descr) looptoken = LoopToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) @@ -1846,6 +1847,20 @@ assert self.cpu.get_latest_value_float(0) == 13.5 assert called + # test the fast path, which should not call assembler_helper() + del called[:] + self.cpu.done_with_this_frame_float_v = done_number + try: + othertoken = LoopToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.set_future_value_float(0, 1.2) + self.cpu.set_future_value_float(1, 3.2) + res = self.cpu.execute_token(othertoken) + assert self.cpu.get_latest_value_float(0) == 1.2 + 3.2 + assert not called + finally: + del self.cpu.done_with_this_frame_float_v + def test_raw_malloced_getarrayitem(self): ARRAY = rffi.CArray(lltype.Signed) descr = self.cpu.arraydescrof(ARRAY) @@ -1870,6 +1885,78 @@ assert a[5] == 12345 lltype.free(a, flavor='raw') + def test_redirect_call_assembler(self): + called = [] + def assembler_helper(failindex, virtualizable): + assert self.cpu.get_latest_value_float(0) == 1.25 + 3.25 + called.append(failindex) + return 13.5 + + FUNCPTR = lltype.Ptr(lltype.FuncType([lltype.Signed, llmemory.GCREF], + lltype.Float)) + class FakeJitDriverSD: + index_of_virtualizable = -1 + _assembler_helper_ptr = llhelper(FUNCPTR, assembler_helper) + assembler_helper_adr = llmemory.cast_ptr_to_adr( + _assembler_helper_ptr) + + ARGS = [lltype.Float, lltype.Float] + RES = lltype.Float + FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( + lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES) + + ops = ''' + [f0, f1] + f2 = float_add(f0, f1) + finish(f2)''' + loop = parse(ops) + looptoken = LoopToken() + looptoken.outermost_jitdriver_sd = FakeJitDriverSD() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.set_future_value_float(0, 1.25) + self.cpu.set_future_value_float(1, 2.35) + res = self.cpu.execute_token(looptoken) + assert self.cpu.get_latest_value_float(0) == 1.25 + 2.35 + assert not called + + ops = ''' + [f4, f5] + f3 = call_assembler(f4, f5, descr=looptoken) + guard_not_forced()[] + finish(f3) + ''' + loop = parse(ops, namespace=locals()) + othertoken = LoopToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + + # normal call_assembler: goes to looptoken + self.cpu.set_future_value_float(0, 1.25) + self.cpu.set_future_value_float(1, 3.25) + res = self.cpu.execute_token(othertoken) + assert self.cpu.get_latest_value_float(0) == 13.5 + assert called + del called[:] + + # compile a replacement + ops = ''' + [f0, f1] + f2 = float_sub(f0, f1) + finish(f2)''' + loop = parse(ops) + looptoken2 = LoopToken() + looptoken2.outermost_jitdriver_sd = FakeJitDriverSD() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken2) + + # install it + self.cpu.redirect_call_assembler(looptoken, looptoken2) + + # now, our call_assembler should go to looptoken2 + self.cpu.set_future_value_float(0, 6.0) + self.cpu.set_future_value_float(1, 1.5) # 6.0-1.5 == 1.25+3.25 + res = self.cpu.execute_token(othertoken) + assert self.cpu.get_latest_value_float(0) == 13.5 + assert called + class OOtypeBackendTest(BaseBackendTest): Modified: pypy/branch/jit-generator/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/backend/x86/assembler.py (original) +++ pypy/branch/jit-generator/pypy/jit/backend/x86/assembler.py Fri Sep 10 11:51:07 2010 @@ -326,8 +326,10 @@ looptoken._x86_param_depth = param_depth looptoken._x86_direct_bootstrap_code = self.mc.tell() - self._assemble_bootstrap_direct_call(arglocs, curadr, - frame_depth+param_depth) + finaljmp = self._assemble_bootstrap_direct_call(arglocs, curadr, + frame_depth+param_depth) + looptoken._x86_redirect_call_assembler = finaljmp + # debug_print("Loop #", looptoken.number, "has address", looptoken._x86_loop_code, "to", self.mc.tell()) self.mc.end_function() @@ -527,7 +529,24 @@ assert isinstance(loc, StackLoc) self.mc.MOVSD_bx(loc.value, xmmtmp.value) self.mc.JMP_l(jmpadr) - return adr_stackadjust + return self.mc.tell() + + def redirect_call_assembler(self, oldlooptoken, newlooptoken): + # some minimal sanity checking + oldnonfloatlocs, oldfloatlocs = oldlooptoken._x86_arglocs + newnonfloatlocs, newfloatlocs = newlooptoken._x86_arglocs + assert len(oldnonfloatlocs) == len(newnonfloatlocs) + assert len(oldfloatlocs) == len(newfloatlocs) + # must patch the JMP at the end of the oldlooptoken's bootstrap- + # -direct-call code to go to the new loop's body + adr = oldlooptoken._x86_redirect_call_assembler + target = newlooptoken._x86_loop_code + if IS_X86_64: + self.redirect_call_assembler_64(oldlooptoken, newlooptoken) + else: + mc = codebuf.InMemoryCodeBuilder(adr - 4, adr) + mc.writeimm32(target - adr) + mc.done() def _assemble_bootstrap_direct_call_64(self, arglocs, jmpadr, stackdepth): # XXX: Very similar to _emit_call_64 @@ -580,9 +599,19 @@ # clobber the scratch register self.mc.MOV(loc, X86_64_SCRATCH_REG) + finaljmp = self.mc.tell() self.mc.JMP(imm(jmpadr)) - - return adr_stackadjust + # leave a total of 16 bytes, enough for all encodings of JMP + for i in range(self.mc.tell() - finaljmp, 16): + self.mc.NOP() + return finaljmp + + def redirect_call_assembler_64(self, adr, target): + # we have a total of 16 bytes free to overwrite the JMP, + # reserved by _assemble_bootstrap_direct_call_64() + mc = codebuf.InMemoryCodeBuilder(adr, adr + 16) + mc.JMP(imm(target)) + mc.done() def _assemble_bootstrap_code(self, inputargs, arglocs): nonfloatlocs, floatlocs = arglocs Modified: pypy/branch/jit-generator/pypy/jit/backend/x86/runner.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/backend/x86/runner.py (original) +++ pypy/branch/jit-generator/pypy/jit/backend/x86/runner.py Fri Sep 10 11:51:07 2010 @@ -134,6 +134,9 @@ assert fail_index == fail_index_2 return faildescr + def redirect_call_assembler(self, oldlooptoken, newlooptoken): + self.assembler.redirect_call_assembler(oldlooptoken, newlooptoken) + class CPU386(AbstractX86CPU): WORD = 4 NUM_REGS = 8 Modified: pypy/branch/jit-generator/pypy/jit/metainterp/compile.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/compile.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/compile.py Fri Sep 10 11:51:07 2010 @@ -559,18 +559,20 @@ propagate_exception_descr = PropagateExceptionDescr() -def compile_tmp_callback(cpu, jitdriver_sd, greenkey, redboxes): +def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redboxes): """Make a LoopToken that corresponds to assembler code that just calls back the interpreter. Used temporarily: a fully compiled version of the code may end up replacing it. """ - # 'redboxes' is only used to know the types of red arguments + # 'redboxes' is only used to know the types of red arguments. inputargs = [box.clonebox() for box in redboxes] loop_token = make_loop_token(len(inputargs), jitdriver_sd) - # + # 'nb_red_args' might be smaller than len(redboxes), + # because it doesn't include the virtualizable boxes. + nb_red_args = jitdriver_sd.num_red_args k = jitdriver_sd.portal_runner_adr funcbox = history.ConstInt(heaptracker.adr2int(k)) - callargs = [funcbox] + greenkey + inputargs + callargs = [funcbox] + greenboxes + inputargs[:nb_red_args] # result_type = jitdriver_sd.result_type if result_type == history.INT: @@ -584,9 +586,9 @@ else: assert 0, "bad result_type" if result is not None: - finishargs = [] - else: finishargs = [result] + else: + finishargs = [] # jd = jitdriver_sd faildescr = propagate_exception_descr Modified: pypy/branch/jit-generator/pypy/jit/metainterp/jitdriver.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/jitdriver.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/jitdriver.py Fri Sep 10 11:51:07 2010 @@ -10,6 +10,7 @@ # self.portal_runner_adr ... pypy.jit.metainterp.warmspot # self.portal_calldescr ... pypy.jit.metainterp.warmspot # self.num_green_args ... pypy.jit.metainterp.warmspot + # self.num_red_args ... pypy.jit.metainterp.warmspot # self.result_type ... pypy.jit.metainterp.warmspot # self.virtualizable_info... pypy.jit.metainterp.warmspot # self.warmstate ... pypy.jit.metainterp.warmspot Modified: pypy/branch/jit-generator/pypy/jit/metainterp/pyjitpl.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/pyjitpl.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/pyjitpl.py Fri Sep 10 11:51:07 2010 @@ -690,25 +690,27 @@ targetjitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex] allboxes = greenboxes + redboxes warmrunnerstate = targetjitdriver_sd.warmstate - token = None + assembler_call = False if warmrunnerstate.inlining: if warmrunnerstate.can_inline_callable(greenboxes): portal_code = targetjitdriver_sd.mainjitcode return self.metainterp.perform_call(portal_code, allboxes, greenkey=greenboxes) - token = warmrunnerstate.get_assembler_token(greenboxes, redboxes) + assembler_call = True # verify that we have all green args, needed to make sure # that assembler that we call is still correct self.verify_green_args(targetjitdriver_sd, greenboxes) # - return self.do_recursive_call(targetjitdriver_sd, allboxes, token) + return self.do_recursive_call(targetjitdriver_sd, allboxes, + assembler_call) - def do_recursive_call(self, targetjitdriver_sd, allboxes, token=None): + def do_recursive_call(self, targetjitdriver_sd, allboxes, + assembler_call=False): portal_code = targetjitdriver_sd.mainjitcode k = targetjitdriver_sd.portal_runner_adr funcbox = ConstInt(heaptracker.adr2int(k)) - return self.do_residual_call(funcbox, portal_code.calldescr, - allboxes, assembler_call_token=token, + return self.do_residual_call(funcbox, portal_code.calldescr, allboxes, + assembler_call=assembler_call, assembler_call_jd=targetjitdriver_sd) opimpl_recursive_call_i = _opimpl_recursive_call @@ -828,8 +830,6 @@ self.metainterp.reached_loop_header(greenboxes, redboxes) self.pc = saved_pc else: - warmrunnerstate = jitdriver_sd.warmstate - token = warmrunnerstate.get_assembler_token(greenboxes, redboxes) # warning! careful here. We have to return from the current # frame containing the jit_merge_point, and then use # do_recursive_call() to follow the recursive call. This is @@ -843,7 +843,8 @@ except ChangeFrame: pass frame = self.metainterp.framestack[-1] - frame.do_recursive_call(jitdriver_sd, greenboxes + redboxes, token) + frame.do_recursive_call(jitdriver_sd, greenboxes + redboxes, + assembler_call=True) raise ChangeFrame def debug_merge_point(self, jitdriver_sd, greenkey): @@ -1058,7 +1059,7 @@ return resbox def do_residual_call(self, funcbox, descr, argboxes, - assembler_call_token=None, + assembler_call=False, assembler_call_jd=None): # First build allboxes: it may need some reordering from the # list provided in argboxes, depending on the order in which @@ -1096,16 +1097,15 @@ if (effectinfo is None or effectinfo.extraeffect == effectinfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE or - assembler_call_token is not None): + assembler_call): # residual calls require attention to keep virtualizables in-sync self.metainterp.clear_exception() self.metainterp.vable_and_vrefs_before_residual_call() resbox = self.metainterp.execute_and_record_varargs( rop.CALL_MAY_FORCE, allboxes, descr=descr) self.metainterp.vrefs_after_residual_call() - if assembler_call_token is not None: - self.metainterp.direct_assembler_call(assembler_call_token, - assembler_call_jd) + if assembler_call: + self.metainterp.direct_assembler_call(assembler_call_jd) if resbox is not None: self.make_result_of_lastop(resbox) self.metainterp.vable_after_residual_call() @@ -2104,20 +2104,24 @@ op.args = [resbox_as_const] + op.args return resbox - def direct_assembler_call(self, token, targetjitdriver_sd): + def direct_assembler_call(self, targetjitdriver_sd): """ Generate a direct call to assembler for portal entry point, patching the CALL_MAY_FORCE that occurred just now. """ op = self.history.operations.pop() assert op.opnum == rop.CALL_MAY_FORCE num_green_args = targetjitdriver_sd.num_green_args - args = op.args[num_green_args + 1:] + greenargs = op.args[1:num_green_args+1] + args = op.args[num_green_args+1:] + assert len(args) == targetjitdriver_sd.num_red_args vinfo = targetjitdriver_sd.virtualizable_info if vinfo is not None: index = targetjitdriver_sd.index_of_virtualizable vbox = args[index] args = args + self.gen_load_from_other_virtualizable(vinfo, vbox) # ^^^ and not "+=", which makes 'args' a resizable list + warmrunnerstate = targetjitdriver_sd.warmstate + token = warmrunnerstate.get_assembler_token(greenargs, args) op.opnum = rop.CALL_ASSEMBLER op.args = args op.descr = token Modified: pypy/branch/jit-generator/pypy/jit/metainterp/test/test_recursive.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/test/test_recursive.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/test/test_recursive.py Fri Sep 10 11:51:07 2010 @@ -612,9 +612,32 @@ driver.can_enter_jit(codeno=codeno, i=i, j=j) portal(2, 50) - self.meta_interp(portal, [2, 20], inline=True) - self.check_loops(call_assembler=0, call_may_force=1, - everywhere=True) + + from pypy.jit.metainterp import compile, pyjitpl + pyjitpl._warmrunnerdesc = None + trace = [] + def my_ctc(*args): + looptoken = original_ctc(*args) + trace.append(looptoken) + return looptoken + original_ctc = compile.compile_tmp_callback + try: + compile.compile_tmp_callback = my_ctc + self.meta_interp(portal, [2, 20], inline=True) + self.check_loops(call_assembler=1, call_may_force=0, + everywhere=True) + finally: + compile.compile_tmp_callback = original_ctc + # check that we made a temporary callback + assert len(trace) == 1 + # and that we later redirected it to something else + try: + redirected = pyjitpl._warmrunnerdesc.cpu._redirected_call_assembler + except AttributeError: + pass # not the llgraph backend + else: + print redirected + assert redirected.keys() == trace def test_directly_call_assembler_return(self): driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'], Modified: pypy/branch/jit-generator/pypy/jit/metainterp/test/test_warmstate.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/test/test_warmstate.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/test/test_warmstate.py Fri Sep 10 11:51:07 2010 @@ -162,6 +162,8 @@ assert cell1.entry_loop_token == "entry loop token" def test_make_jitdriver_callbacks_1(): + class FakeWarmRunnerDesc: + cpu = None class FakeJitDriverSD: _green_args_spec = [lltype.Signed, lltype.Float] _get_printable_location_ptr = None @@ -169,7 +171,7 @@ _can_never_inline_ptr = None class FakeCell: dont_trace_here = False - state = WarmEnterState(None, FakeJitDriverSD()) + state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) def jit_getter(build, *args): return FakeCell() state.jit_getter = jit_getter @@ -186,6 +188,7 @@ lltype.Ptr(rstr.STR))) class FakeWarmRunnerDesc: rtyper = None + cpu = None class FakeJitDriverSD: _green_args_spec = [lltype.Signed, lltype.Float] _get_printable_location_ptr = llhelper(GET_LOCATION, get_location) @@ -207,6 +210,7 @@ lltype.Signed], lltype.Bool)) class FakeWarmRunnerDesc: rtyper = None + cpu = None class FakeJitDriverSD: _green_args_spec = [lltype.Signed, lltype.Float] _get_printable_location_ptr = None @@ -228,6 +232,7 @@ [lltype.Signed, lltype.Float], lltype.Bool)) class FakeWarmRunnerDesc: rtyper = None + cpu = None class FakeJitDriverSD: _green_args_spec = [lltype.Signed, lltype.Float] _get_printable_location_ptr = None Modified: pypy/branch/jit-generator/pypy/jit/metainterp/warmspot.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/warmspot.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/warmspot.py Fri Sep 10 11:51:07 2010 @@ -457,6 +457,7 @@ jd._green_args_spec = [v.concretetype for v in greens_v] jd._red_args_types = [history.getkind(v.concretetype) for v in reds_v] jd.num_green_args = len(jd._green_args_spec) + jd.num_red_args = len(jd._red_args_types) RESTYPE = graph.getreturnvar().concretetype (jd._JIT_ENTER_FUNCTYPE, jd._PTR_JIT_ENTER_FUNCTYPE) = self.cpu.ts.get_FuncType(ALLARGS, lltype.Void) Modified: pypy/branch/jit-generator/pypy/jit/metainterp/warmstate.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/warmstate.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/warmstate.py Fri Sep 10 11:51:07 2010 @@ -211,7 +211,11 @@ entry_loop_token): cell = self.jit_cell_at_key(greenkey) cell.counter = -1 + old_token = cell.entry_loop_token cell.entry_loop_token = entry_loop_token + if old_token is not None: + cpu = self.warmrunnerdesc.cpu + cpu.redirect_call_assembler(old_token, entry_loop_token) # ---------- @@ -492,7 +496,7 @@ unwrap_greenkey = self.make_unwrap_greenkey() jit_getter = self.make_jitcell_getter() jd = self.jitdriver_sd - cpu = warmrunnerdesc.cpu + cpu = self.warmrunnerdesc.cpu def can_inline_greenargs(*greenargs): if can_never_inline(*greenargs): From fijal at codespeak.net Fri Sep 10 12:08:31 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Fri, 10 Sep 2010 12:08:31 +0200 (CEST) Subject: [pypy-svn] r76995 - in pypy/trunk: . pypy/jit/tool pypy/module/_socket pypy/module/_socket/test pypy/module/array/benchmark pypy/module/array/test pypy/rlib pypy/tool/release Message-ID: <20100910100831.D4129282BEF@codespeak.net> Author: fijal Date: Fri Sep 10 12:08:30 2010 New Revision: 76995 Modified: pypy/trunk/ (props changed) pypy/trunk/pypy/jit/tool/traceviewer.py pypy/trunk/pypy/module/_socket/interp_func.py pypy/trunk/pypy/module/_socket/interp_socket.py pypy/trunk/pypy/module/_socket/test/test_sock_app.py pypy/trunk/pypy/module/array/benchmark/Makefile (props changed) pypy/trunk/pypy/module/array/benchmark/intimg.c (props changed) pypy/trunk/pypy/module/array/benchmark/intimgtst.c (props changed) pypy/trunk/pypy/module/array/benchmark/intimgtst.py (props changed) pypy/trunk/pypy/module/array/benchmark/loop.c (props changed) pypy/trunk/pypy/module/array/benchmark/sum.c (props changed) pypy/trunk/pypy/module/array/benchmark/sumtst.c (props changed) pypy/trunk/pypy/module/array/benchmark/sumtst.py (props changed) pypy/trunk/pypy/module/array/test/test_array_old.py (props changed) pypy/trunk/pypy/rlib/_rsocket_rffi.py pypy/trunk/pypy/rlib/rsocket.py pypy/trunk/pypy/tool/release/force-builds.py Log: Merge rsocket-improvements branch. This branch intends to provide support for AF_PACKET type addresses (wherever supported) Modified: pypy/trunk/pypy/jit/tool/traceviewer.py ============================================================================== --- pypy/trunk/pypy/jit/tool/traceviewer.py (original) +++ pypy/trunk/pypy/jit/tool/traceviewer.py Fri Sep 10 12:08:30 2010 @@ -250,13 +250,14 @@ class Counts(dict): pass -def main(loopfile, options, view=True): +def main(loopfile, use_threshold, view=True): countname = py.path.local(loopfile + '.count') if countname.check(): - counts = [line.rsplit(':', 1) for line in countname.readlines()] - counts = Counts([(k, int(v.strip('\n'))) for k, v in counts]) + counts = [re.split(r' +', line, 1) for line in countname.readlines()] + counts = Counts([(k.strip("\n"), int(v.strip('\n'))) + for v, k in counts]) l = list(sorted(counts.values())) - if len(l) > 20 and options.use_threshold: + if len(l) > 20 and use_threshold: counts.threshold = l[-20] else: counts.threshold = 0 @@ -274,7 +275,7 @@ if __name__ == '__main__': parser = optparse.OptionParser(usage=__doc__) parser.add_option('--use-threshold', dest='use_threshold', - action="store_true") + action="store_true", default=False) options, args = parser.parse_args(sys.argv) if len(args) != 2: print __doc__ Modified: pypy/trunk/pypy/module/_socket/interp_func.py ============================================================================== --- pypy/trunk/pypy/module/_socket/interp_func.py (original) +++ pypy/trunk/pypy/module/_socket/interp_func.py Fri Sep 10 12:08:30 2010 @@ -280,7 +280,7 @@ space.wrap(socktype), space.wrap(protocol), space.wrap(canonname), - addr.as_object(space)]) + addr.as_object(-1, space)]) # -1 as per cpython for (family, socktype, protocol, canonname, addr) in lst] return space.newlist(lst1) getaddrinfo.unwrap_spec = [ObjSpace, W_Root, W_Root, int, int, int, int] Modified: pypy/trunk/pypy/module/_socket/interp_socket.py ============================================================================== --- pypy/trunk/pypy/module/_socket/interp_socket.py (original) +++ pypy/trunk/pypy/module/_socket/interp_socket.py Fri Sep 10 12:08:30 2010 @@ -24,7 +24,7 @@ try: sock, addr = self.accept(W_RSocket) return space.newtuple([space.wrap(sock), - addr.as_object(space)]) + addr.as_object(sock.fd, space)]) except SocketError, e: raise converted_error(space, e) accept_w.unwrap_spec = ['self', ObjSpace] @@ -109,7 +109,7 @@ """ try: addr = self.getpeername() - return addr.as_object(space) + return addr.as_object(self.fd, space) except SocketError, e: raise converted_error(space, e) getpeername_w.unwrap_spec = ['self', ObjSpace] @@ -122,7 +122,7 @@ """ try: addr = self.getsockname() - return addr.as_object(space) + return addr.as_object(self.fd, space) except SocketError, e: raise converted_error(space, e) getsockname_w.unwrap_spec = ['self', ObjSpace] @@ -202,7 +202,7 @@ try: data, addr = self.recvfrom(buffersize, flags) if addr: - w_addr = addr.as_object(space) + w_addr = addr.as_object(self.fd, space) else: w_addr = space.w_None return space.newtuple([space.wrap(data), w_addr]) @@ -330,7 +330,7 @@ try: readlgt, addr = self.recvfrom_into(rwbuffer, nbytes, flags) if addr: - w_addr = addr.as_object(space) + w_addr = addr.as_object(self.fd, space) else: w_addr = space.w_None return space.newtuple([space.wrap(readlgt), w_addr]) Modified: pypy/trunk/pypy/module/_socket/test/test_sock_app.py ============================================================================== --- pypy/trunk/pypy/module/_socket/test/test_sock_app.py (original) +++ pypy/trunk/pypy/module/_socket/test/test_sock_app.py Fri Sep 10 12:08:30 2010 @@ -2,6 +2,8 @@ import sys import py from pypy.tool.udir import udir +from pypy.rlib import rsocket +from pypy.rpython.lltypesystem import lltype, rffi def setup_module(mod): mod.space = gettestobjspace(usemodules=['_socket', 'array']) @@ -221,21 +223,45 @@ "(_socket, host, port): return _socket.getaddrinfo(host, port)") assert space.unwrap(w_l) == info -def test_unknown_addr_as_object(): - from pypy.rlib import rsocket - from pypy.rpython.lltypesystem import lltype, rffi - +def test_unknown_addr_as_object(): c_addr = lltype.malloc(rsocket._c.sockaddr, flavor='raw') c_addr.c_sa_data[0] = 'c' rffi.setintfield(c_addr, 'c_sa_family', 15) # XXX what size to pass here? for the purpose of this test it has # to be short enough so we have some data, 1 sounds good enough # + sizeof USHORT - w_obj = rsocket.Address(c_addr, 1 + 2).as_object(space) + w_obj = rsocket.Address(c_addr, 1 + 2).as_object(-1, space) assert space.is_true(space.isinstance(w_obj, space.w_tuple)) assert space.int_w(space.getitem(w_obj, space.wrap(0))) == 15 assert space.str_w(space.getitem(w_obj, space.wrap(1))) == 'c' +def test_addr_raw_packet(): + if not hasattr(rsocket._c, 'sockaddr_ll'): + py.test.skip("posix specific test") + c_addr_ll = lltype.malloc(rsocket._c.sockaddr_ll, flavor='raw') + addrlen = rffi.sizeof(rsocket._c.sockaddr_ll) + c_addr = rffi.cast(lltype.Ptr(rsocket._c.sockaddr), c_addr_ll) + rffi.setintfield(c_addr_ll, 'c_sll_ifindex', 1) + rffi.setintfield(c_addr_ll, 'c_sll_protocol', 8) + rffi.setintfield(c_addr_ll, 'c_sll_pkttype', 13) + rffi.setintfield(c_addr_ll, 'c_sll_hatype', 0) + rffi.setintfield(c_addr_ll, 'c_sll_halen', 3) + c_addr_ll.c_sll_addr[0] = 'a' + c_addr_ll.c_sll_addr[1] = 'b' + c_addr_ll.c_sll_addr[2] = 'c' + rffi.setintfield(c_addr, 'c_sa_family', socket.AF_PACKET) + # fd needs to be somehow valid + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + fd = s.fileno() + w_obj = rsocket.make_address(c_addr, addrlen).as_object(fd, space) + assert space.is_true(space.eq(w_obj, space.newtuple([ + space.wrap('lo'), + space.wrap(socket.ntohs(8)), + space.wrap(13), + space.wrap(False), + space.wrap("abc"), + ]))) + def test_getnameinfo(): host = "127.0.0.1" port = 25 Modified: pypy/trunk/pypy/rlib/_rsocket_rffi.py ============================================================================== --- pypy/trunk/pypy/rlib/_rsocket_rffi.py (original) +++ pypy/trunk/pypy/rlib/_rsocket_rffi.py Fri Sep 10 12:08:30 2010 @@ -32,6 +32,9 @@ 'arpa/inet.h', 'stdint.h', 'errno.h', + 'netpacket/packet.h', + 'sys/ioctl.h', + 'net/if.h', ) cond_includes = [('AF_NETLINK', 'linux/netlink.h')] libraries = () @@ -190,6 +193,8 @@ FD_CONNECT_BIT FD_CLOSE_BIT WSA_IO_PENDING WSA_IO_INCOMPLETE WSA_INVALID_HANDLE WSA_INVALID_PARAMETER WSA_NOT_ENOUGH_MEMORY WSA_OPERATION_ABORTED + +SIOCGIFNAME '''.split() for name in constant_names: @@ -309,6 +314,19 @@ [('fd', socketfd_type), ('events', rffi.SHORT), ('revents', rffi.SHORT)]) + + CConfig.sockaddr_ll = platform.Struct('struct sockaddr_ll', + [('sll_ifindex', rffi.INT), + ('sll_protocol', rffi.INT), + ('sll_pkttype', rffi.INT), + ('sll_hatype', rffi.INT), + ('sll_addr', rffi.CFixedArray(rffi.CHAR, 8)), + ('sll_halen', rffi.INT)], + ) + + CConfig.ifreq = platform.Struct('struct ifreq', [('ifr_ifindex', rffi.INT), + ('ifr_name', rffi.CFixedArray(rffi.CHAR, 8))]) + if _WIN32: CConfig.WSAEVENT = platform.SimpleType('WSAEVENT', rffi.VOIDP) CConfig.WSANETWORKEVENTS = platform.Struct( @@ -408,6 +426,8 @@ if _POSIX: nfds_t = cConfig.nfds_t pollfd = cConfig.pollfd + sockaddr_ll = cConfig.sockaddr_ll + ifreq = cConfig.ifreq if WIN32: WSAEVENT = cConfig.WSAEVENT WSANETWORKEVENTS = cConfig.WSANETWORKEVENTS @@ -510,6 +530,8 @@ socketpair_t = rffi.CArray(socketfd_type) socketpair = external('socketpair', [rffi.INT, rffi.INT, rffi.INT, lltype.Ptr(socketpair_t)], rffi.INT) + ioctl = external('ioctl', [socketfd_type, rffi.INT, lltype.Ptr(ifreq)], + rffi.INT) if _WIN32: ioctlsocket = external('ioctlsocket', Modified: pypy/trunk/pypy/rlib/rsocket.py ============================================================================== --- pypy/trunk/pypy/rlib/rsocket.py (original) +++ pypy/trunk/pypy/rlib/rsocket.py Fri Sep 10 12:08:30 2010 @@ -6,8 +6,7 @@ # Known missing features: # -# - support for non-Linux platforms -# - address families other than AF_INET, AF_INET6, AF_UNIX +# - address families other than AF_INET, AF_INET6, AF_UNIX, AF_PACKET # - methods makefile(), # - SSL # @@ -109,7 +108,7 @@ """ keepalive_until_here(self) - def as_object(self, space): + def as_object(self, fd, space): """Convert the address to an app-level object.""" # If we don't know the address family, don't raise an # exception -- return it as a tuple. @@ -200,6 +199,66 @@ # ____________________________________________________________ +if 'AF_PACKET' in constants: + class PacketAddress(Address): + family = AF_PACKET + struct = _c.sockaddr_ll + maxlen = minlen = sizeof(struct) + + def get_ifname(self, fd): + a = self.lock(_c.sockaddr_ll) + p = lltype.malloc(_c.ifreq, flavor='raw') + rffi.setintfield(p, 'c_ifr_ifindex', + rffi.getintfield(a, 'c_sll_ifindex')) + if (_c.ioctl(fd, _c.SIOCGIFNAME, p) == 0): + # eh, the iface name is a constant length array + i = 0 + d = [] + while p.c_ifr_name[i] != '\x00' and i < len(p.c_ifr_name): + d.append(p.c_ifr_name[i]) + i += 1 + ifname = ''.join(d) + else: + ifname = "" + lltype.free(p, flavor='raw') + self.unlock() + return ifname + + def get_protocol(self): + a = self.lock(_c.sockaddr_ll) + res = ntohs(rffi.getintfield(a, 'c_sll_protocol')) + self.unlock() + return res + + def get_pkttype(self): + a = self.lock(_c.sockaddr_ll) + res = rffi.getintfield(a, 'c_sll_pkttype') + self.unlock() + return res + + def get_hatype(self): + a = self.lock(_c.sockaddr_ll) + res = bool(rffi.getintfield(a, 'c_sll_hatype')) + self.unlock() + return res + + def get_addr(self): + a = self.lock(_c.sockaddr_ll) + lgt = rffi.getintfield(a, 'c_sll_halen') + d = [] + for i in range(lgt): + d.append(a.c_sll_addr[i]) + res = "".join(d) + self.unlock() + return res + + def as_object(self, fd, space): + return space.newtuple([space.wrap(self.get_ifname(fd)), + space.wrap(self.get_protocol()), + space.wrap(self.get_pkttype()), + space.wrap(self.get_hatype()), + space.wrap(self.get_addr())]) + class INETAddress(IPAddress): family = AF_INET struct = _c.sockaddr_in @@ -228,7 +287,7 @@ self.get_host() == other.get_host() and self.get_port() == other.get_port()) - def as_object(self, space): + def as_object(self, fd, space): return space.newtuple([space.wrap(self.get_host()), space.wrap(self.get_port())]) @@ -317,7 +376,7 @@ self.get_flowinfo() == other.get_flowinfo() and self.get_scope_id() == other.get_scope_id()) - def as_object(self, space): + def as_object(self, fd, space): return space.newtuple([space.wrap(self.get_host()), space.wrap(self.get_port()), space.wrap(self.get_flowinfo()), @@ -421,7 +480,7 @@ return (isinstance(other, UNIXAddress) and self.get_path() == other.get_path()) - def as_object(self, space): + def as_object(self, fd, space): return space.wrap(self.get_path()) def from_object(space, w_address): @@ -456,7 +515,7 @@ def __repr__(self): return '' % (self.get_pid(), self.get_groups()) - def as_object(self, space): + def as_object(self, fd, space): return space.newtuple([space.wrap(self.get_pid()), space.wrap(self.get_groups())]) @@ -613,7 +672,7 @@ # convert an Address into an app-level object def addr_as_object(self, space, address): - return address.as_object(space) + return address.as_object(self.fd, space) # convert an app-level object into an Address # based on the current socket's family Modified: pypy/trunk/pypy/tool/release/force-builds.py ============================================================================== --- pypy/trunk/pypy/tool/release/force-builds.py (original) +++ pypy/trunk/pypy/tool/release/force-builds.py Fri Sep 10 12:08:30 2010 @@ -21,9 +21,9 @@ 'own-linux-x86-64', # 'own-macosx-x86-32', 'pypy-c-app-level-linux-x86-32', - 'pypy-c-app-level-linux-64', + 'pypy-c-app-level-linux-x86-64', 'pypy-c-stackless-app-level-linux-x86-32', - 'pypy-c-app-level-win-32', + 'pypy-c-app-level-win-x86-32', 'pypy-c-jit-linux-x86-32', # 'pypy-c-jit-macosx-x86-32', 'pypy-c-jit-win-x86-32', From fijal at codespeak.net Fri Sep 10 13:26:34 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Fri, 10 Sep 2010 13:26:34 +0200 (CEST) Subject: [pypy-svn] r76996 - pypy/build/bot2/pypybuildbot Message-ID: <20100910112634.77597282B9C@codespeak.net> Author: fijal Date: Fri Sep 10 13:26:30 2010 New Revision: 76996 Modified: pypy/build/bot2/pypybuildbot/master.py Log: enable 64bit jit nightly Modified: pypy/build/bot2/pypybuildbot/master.py ============================================================================== --- pypy/build/bot2/pypybuildbot/master.py (original) +++ pypy/build/bot2/pypybuildbot/master.py Fri Sep 10 13:26:30 2010 @@ -189,6 +189,7 @@ # and be hopefully finished after 2 hours LINUX32, # on tannit32, uses 4 cores JITLINUX32, # on tannit32, uses 1 core + JITLINUX64, # on tannit64, uses 1 core OJITLINUX32, # on tannit32, uses 1 core MACOSX32, # on minime APPLVLWIN32, # on bigboard From arigo at codespeak.net Fri Sep 10 14:11:09 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 10 Sep 2010 14:11:09 +0200 (CEST) Subject: [pypy-svn] r76997 - pypy/branch/saner-guard-exc Message-ID: <20100910121109.EE54A282B9C@codespeak.net> Author: arigo Date: Fri Sep 10 14:11:08 2010 New Revision: 76997 Removed: pypy/branch/saner-guard-exc/ Log: Failed, remove. It's also not necessary, as I managed to work around the issue in branch/jit-generator. From arigo at codespeak.net Fri Sep 10 14:57:21 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 10 Sep 2010 14:57:21 +0200 (CEST) Subject: [pypy-svn] r76998 - pypy/branch/jit-generator/pypy/jit/metainterp/test Message-ID: <20100910125721.B2A28282B9C@codespeak.net> Author: arigo Date: Fri Sep 10 14:57:19 2010 New Revision: 76998 Modified: pypy/branch/jit-generator/pypy/jit/metainterp/test/test_recursive.py Log: A test with virtualizables along compile_tmp_callback(). Passes. Modified: pypy/branch/jit-generator/pypy/jit/metainterp/test/test_recursive.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/test/test_recursive.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/test/test_recursive.py Fri Sep 10 14:57:19 2010 @@ -639,6 +639,62 @@ print redirected assert redirected.keys() == trace + def test_recursion_cant_call_assembler_directly_with_virtualizable(self): + # exactly the same logic as the previous test, but with 'frame.j' + # instead of just 'j' + class Frame(object): + _virtualizable2_ = ['j'] + def __init__(self, j): + self.j = j + + driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], + virtualizables = ['frame'], + get_printable_location = lambda codeno : str(codeno)) + + def portal(codeno, frame): + i = 0 + while 1: + driver.jit_merge_point(codeno=codeno, i=i, frame=frame) + if i == 1: + if frame.j == 0: + return + portal(2, Frame(frame.j - 1)) + elif i == 3: + return + i += 1 + driver.can_enter_jit(codeno=codeno, i=i, frame=frame) + + def main(codeno, j): + portal(codeno, Frame(j)) + + main(2, 50) + + from pypy.jit.metainterp import compile, pyjitpl + pyjitpl._warmrunnerdesc = None + trace = [] + def my_ctc(*args): + looptoken = original_ctc(*args) + trace.append(looptoken) + return looptoken + original_ctc = compile.compile_tmp_callback + try: + compile.compile_tmp_callback = my_ctc + self.meta_interp(main, [2, 20], inline=True) + self.check_loops(call_assembler=1, call_may_force=0, + everywhere=True) + finally: + compile.compile_tmp_callback = original_ctc + # check that we made a temporary callback + assert len(trace) == 1 + # and that we later redirected it to something else + try: + redirected = pyjitpl._warmrunnerdesc.cpu._redirected_call_assembler + except AttributeError: + pass # not the llgraph backend + else: + print redirected + assert redirected.keys() == trace + def test_directly_call_assembler_return(self): driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'], get_printable_location = lambda codeno : str(codeno)) From arigo at codespeak.net Fri Sep 10 15:10:09 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 10 Sep 2010 15:10:09 +0200 (CEST) Subject: [pypy-svn] r76999 - pypy/branch/jit-generator/pypy/jit/metainterp Message-ID: <20100910131009.26EB1282B9C@codespeak.net> Author: arigo Date: Fri Sep 10 15:10:06 2010 New Revision: 76999 Modified: pypy/branch/jit-generator/pypy/jit/metainterp/warmstate.py Log: No-op simplification. Modified: pypy/branch/jit-generator/pypy/jit/metainterp/warmstate.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/metainterp/warmstate.py (original) +++ pypy/branch/jit-generator/pypy/jit/metainterp/warmstate.py Fri Sep 10 15:10:06 2010 @@ -513,8 +513,7 @@ def get_assembler_token(greenkey, redboxes): # 'redboxes' is only used to know the types of red arguments - greenargs = unwrap_greenkey(greenkey) - cell = jit_getter(True, *greenargs) + cell = self.jit_cell_at_key(greenkey) if cell.entry_loop_token is None: from pypy.jit.metainterp.compile import compile_tmp_callback cell.entry_loop_token = compile_tmp_callback(cpu, jd, greenkey, From arigo at codespeak.net Fri Sep 10 15:10:33 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 10 Sep 2010 15:10:33 +0200 (CEST) Subject: [pypy-svn] r77000 - pypy/branch/jit-generator/pypy/jit/backend/llgraph Message-ID: <20100910131033.2A34F282B9C@codespeak.net> Author: arigo Date: Fri Sep 10 15:10:31 2010 New Revision: 77000 Modified: pypy/branch/jit-generator/pypy/jit/backend/llgraph/llimpl.py Log: Print the inputargs too. Modified: pypy/branch/jit-generator/pypy/jit/backend/llgraph/llimpl.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/backend/llgraph/llimpl.py (original) +++ pypy/branch/jit-generator/pypy/jit/backend/llgraph/llimpl.py Fri Sep 10 15:10:31 2010 @@ -171,7 +171,7 @@ def __repr__(self): lines = [] self.as_text(lines, 1) - return 'CompiledLoop:\n%s' % '\n'.join(lines) + return 'CompiledLoop %s:\n%s' % (self.inputargs, '\n'.join(lines)) def as_text(self, lines, indent): for op in self.operations: From arigo at codespeak.net Fri Sep 10 15:11:06 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 10 Sep 2010 15:11:06 +0200 (CEST) Subject: [pypy-svn] r77001 - pypy/branch/jit-generator/pypy/jit/backend/x86 Message-ID: <20100910131106.A73BC282B9C@codespeak.net> Author: arigo Date: Fri Sep 10 15:11:04 2010 New Revision: 77001 Modified: pypy/branch/jit-generator/pypy/jit/backend/x86/assembler.py Log: Fix: don't rely on the exact stack layout to be the same -- it is not in large examples. Modified: pypy/branch/jit-generator/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/branch/jit-generator/pypy/jit/backend/x86/assembler.py (original) +++ pypy/branch/jit-generator/pypy/jit/backend/x86/assembler.py Fri Sep 10 15:11:04 2010 @@ -326,9 +326,8 @@ looptoken._x86_param_depth = param_depth looptoken._x86_direct_bootstrap_code = self.mc.tell() - finaljmp = self._assemble_bootstrap_direct_call(arglocs, curadr, - frame_depth+param_depth) - looptoken._x86_redirect_call_assembler = finaljmp + self._assemble_bootstrap_direct_call(arglocs, curadr, + frame_depth+param_depth) # debug_print("Loop #", looptoken.number, "has address", looptoken._x86_loop_code, "to", self.mc.tell()) @@ -529,24 +528,6 @@ assert isinstance(loc, StackLoc) self.mc.MOVSD_bx(loc.value, xmmtmp.value) self.mc.JMP_l(jmpadr) - return self.mc.tell() - - def redirect_call_assembler(self, oldlooptoken, newlooptoken): - # some minimal sanity checking - oldnonfloatlocs, oldfloatlocs = oldlooptoken._x86_arglocs - newnonfloatlocs, newfloatlocs = newlooptoken._x86_arglocs - assert len(oldnonfloatlocs) == len(newnonfloatlocs) - assert len(oldfloatlocs) == len(newfloatlocs) - # must patch the JMP at the end of the oldlooptoken's bootstrap- - # -direct-call code to go to the new loop's body - adr = oldlooptoken._x86_redirect_call_assembler - target = newlooptoken._x86_loop_code - if IS_X86_64: - self.redirect_call_assembler_64(oldlooptoken, newlooptoken) - else: - mc = codebuf.InMemoryCodeBuilder(adr - 4, adr) - mc.writeimm32(target - adr) - mc.done() def _assemble_bootstrap_direct_call_64(self, arglocs, jmpadr, stackdepth): # XXX: Very similar to _emit_call_64 @@ -601,15 +582,19 @@ finaljmp = self.mc.tell() self.mc.JMP(imm(jmpadr)) - # leave a total of 16 bytes, enough for all encodings of JMP - for i in range(self.mc.tell() - finaljmp, 16): - self.mc.NOP() - return finaljmp - - def redirect_call_assembler_64(self, adr, target): - # we have a total of 16 bytes free to overwrite the JMP, - # reserved by _assemble_bootstrap_direct_call_64() - mc = codebuf.InMemoryCodeBuilder(adr, adr + 16) + + def redirect_call_assembler(self, oldlooptoken, newlooptoken): + # some minimal sanity checking + oldnonfloatlocs, oldfloatlocs = oldlooptoken._x86_arglocs + newnonfloatlocs, newfloatlocs = newlooptoken._x86_arglocs + assert len(oldnonfloatlocs) == len(newnonfloatlocs) + assert len(oldfloatlocs) == len(newfloatlocs) + # we overwrite the instructions at the old _x86_direct_bootstrap_code + # to start with a JMP to the new _x86_direct_bootstrap_code. + # Ideally we should rather patch all existing CALLs, but well. + oldadr = oldlooptoken._x86_direct_bootstrap_code + target = newlooptoken._x86_direct_bootstrap_code + mc = codebuf.InMemoryCodeBuilder(oldadr, oldadr + 16) mc.JMP(imm(target)) mc.done() From arigo at codespeak.net Fri Sep 10 17:07:19 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 10 Sep 2010 17:07:19 +0200 (CEST) Subject: [pypy-svn] r77002 - in pypy/branch/gc-module/pypy: module/gc rlib rpython/memory/gc rpython/memory/gctransform translator/c/test Message-ID: <20100910150719.ED66C282B9C@codespeak.net> Author: arigo Date: Fri Sep 10 17:07:16 2010 New Revision: 77002 Added: pypy/branch/gc-module/pypy/rpython/memory/gc/inspect.py (contents, props changed) Modified: pypy/branch/gc-module/pypy/module/gc/__init__.py pypy/branch/gc-module/pypy/module/gc/referents.py pypy/branch/gc-module/pypy/rlib/rgc.py pypy/branch/gc-module/pypy/rpython/memory/gc/base.py pypy/branch/gc-module/pypy/rpython/memory/gc/markcompact.py pypy/branch/gc-module/pypy/rpython/memory/gc/semispace.py pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py Log: Move the get_rpy_*() helpers in their own file, inspect.py, instead of polluting the namespace of the GC class. Add dump_rpy_heap(), which dumps all objects in the heap to the given file descriptor, in the format recognized by svn/arigo/hack/pypy-hack/heapstats/. Modified: pypy/branch/gc-module/pypy/module/gc/__init__.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/__init__.py (original) +++ pypy/branch/gc-module/pypy/module/gc/__init__.py Fri Sep 10 17:07:16 2010 @@ -25,6 +25,7 @@ 'get_objects': 'referents.get_objects', 'get_referents': 'referents.get_referents', 'get_referrers': 'referents.get_referrers', + 'dump_rpy_heap': 'referents.dump_rpy_heap', 'GcRef': 'referents.W_GcRef', }) MixedModule.__init__(self, space, w_name) Modified: pypy/branch/gc-module/pypy/module/gc/referents.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/referents.py (original) +++ pypy/branch/gc-module/pypy/module/gc/referents.py Fri Sep 10 17:07:16 2010 @@ -147,3 +147,7 @@ pending_w += referents_w return space.newlist(result_w.keys()) get_referrers.unwrap_spec = [ObjSpace, 'args_w'] + +def dump_rpy_heap(space, fd): + rgc.dump_rpy_heap(fd) +dump_rpy_heap.unwrap_spec = [ObjSpace, int] Modified: pypy/branch/gc-module/pypy/rlib/rgc.py ============================================================================== --- pypy/branch/gc-module/pypy/rlib/rgc.py (original) +++ pypy/branch/gc-module/pypy/rlib/rgc.py Fri Sep 10 17:07:16 2010 @@ -368,6 +368,10 @@ else: return id(gcref._x) +def dump_rpy_heap(fd): + "NOT_RPYTHON" + raise NotImplementedError + NULL_GCREF = lltype.nullptr(llmemory.GCREF.TO) class _GcRef(object): @@ -509,3 +513,12 @@ vtable = classrepr.getvtable() assert lltype.typeOf(vtable) == rclass.CLASSTYPE return Constant(vtable, concretetype=rclass.CLASSTYPE) + +class Entry(ExtRegistryEntry): + _about_ = dump_rpy_heap + def compute_result_annotation(self, s_fd): + from pypy.annotation.model import s_None + return s_None + def specialize_call(self, hop): + vlist = hop.inputargs(lltype.Signed) + return hop.genop('gc_dump_rpy_heap', vlist, resulttype = hop.r_result) Modified: pypy/branch/gc-module/pypy/rpython/memory/gc/base.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/memory/gc/base.py (original) +++ pypy/branch/gc-module/pypy/rpython/memory/gc/base.py Fri Sep 10 17:07:16 2010 @@ -103,6 +103,9 @@ def get_size(self, obj): return self._get_size_for_typeid(obj, self.get_type_id(obj)) + def get_size_incl_hash(self, obj): + return self.get_size(obj) + def malloc(self, typeid, length=0, zero=False): """For testing. The interface used by the gctransformer is the four malloc_[fixed,var]size[_clear]() functions. @@ -257,99 +260,6 @@ finally: self.finalizer_lock_count -= 1 - # ---------- implementation of pypy.rlib.rgc.get_rpy_roots() ---------- - - def _counting_rpy_root(self, root): - self._count_rpy += 1 - - def _do_count_rpy_roots(self): - self._count_rpy = 0 - self.root_walker.walk_roots( - GCBase._counting_rpy_root, - GCBase._counting_rpy_root, - GCBase._counting_rpy_root) - return self._count_rpy - - def _append_rpy_root(self, root): - # Can use the gc list, but should not allocate! - # It is essential that the list is not resizable! - lst = self._list_rpy - index = self._count_rpy - if index >= len(lst): - raise ValueError - self._count_rpy = index + 1 - lst[index] = llmemory.cast_adr_to_ptr(root.address[0], llmemory.GCREF) - - def _do_append_rpy_roots(self, lst): - self._count_rpy = 0 - self._list_rpy = lst - self.root_walker.walk_roots( - GCBase._append_rpy_root, - GCBase._append_rpy_root, - GCBase._append_rpy_root) - self._list_rpy = None - - def get_rpy_roots(self): - count = self._do_count_rpy_roots() - extra = 16 - while True: - result = [lltype.nullptr(llmemory.GCREF.TO)] * (count + extra) - try: - self._do_append_rpy_roots(result) - except ValueError: - extra *= 3 - else: - return result - - # ---------- implementation of pypy.rlib.rgc.get_rpy_referents() ---------- - - def _count_rpy_referent(self, pointer, _): - self._count_rpy += 1 - - def _do_count_rpy_referents(self, gcref): - self._count_rpy = 0 - self.trace(llmemory.cast_ptr_to_adr(gcref), - self._count_rpy_referent, None) - return self._count_rpy - - def _append_rpy_referent(self, pointer, _): - # Can use the gc list, but should not allocate! - # It is essential that the list is not resizable! - lst = self._list_rpy - index = self._count_rpy - if index >= len(lst): - raise ValueError - self._count_rpy = index + 1 - lst[index] = llmemory.cast_adr_to_ptr(pointer.address[0], - llmemory.GCREF) - - def _do_append_rpy_referents(self, gcref, lst): - self._count_rpy = 0 - self._list_rpy = lst - self.trace(llmemory.cast_ptr_to_adr(gcref), - self._append_rpy_referent, None) - - def get_rpy_referents(self, gcref): - count = self._do_count_rpy_referents(gcref) - result = [lltype.nullptr(llmemory.GCREF.TO)] * count - self._do_append_rpy_referents(gcref, result) - return result - - # ---------- - - def get_rpy_memory_usage(self, gcref): - # overridden in semispace.py and markcompact.py to also count the hash - return self.get_size(llmemory.cast_ptr_to_adr(gcref)) - - def get_rpy_type_index(self, gcref): - from pypy.rlib.rarithmetic import intmask - typeid = self.get_type_id(llmemory.cast_ptr_to_adr(gcref)) - return self.get_member_index(typeid) - - def is_rpy_instance(self, gcref): - typeid = self.get_type_id(llmemory.cast_ptr_to_adr(gcref)) - return self.is_rpython_class(typeid) - class MovingGCBase(GCBase): moving_gc = True Added: pypy/branch/gc-module/pypy/rpython/memory/gc/inspect.py ============================================================================== --- (empty file) +++ pypy/branch/gc-module/pypy/rpython/memory/gc/inspect.py Fri Sep 10 17:07:16 2010 @@ -0,0 +1,188 @@ +""" +Utility RPython functions to inspect objects in the GC. +""" +from pypy.rpython.lltypesystem import lltype, llmemory, rffi +from pypy.rlib.objectmodel import free_non_gc_object +from pypy.rpython.module.ll_os import underscore_on_windows +from pypy.rlib import rposix + +from pypy.rpython.memory.support import AddressDict, get_address_stack + + +# ---------- implementation of pypy.rlib.rgc.get_rpy_roots() ---------- + +def _counting_rpy_root(gc, root): + gc._count_rpy += 1 + +def _do_count_rpy_roots(gc): + gc._count_rpy = 0 + gc.root_walker.walk_roots( + _counting_rpy_root, + _counting_rpy_root, + _counting_rpy_root) + return gc._count_rpy + +def _append_rpy_root(gc, root): + # Can use the gc list, but should not allocate! + # It is essential that the list is not resizable! + lst = gc._list_rpy + index = gc._count_rpy + if index >= len(lst): + raise ValueError + gc._count_rpy = index + 1 + lst[index] = llmemory.cast_adr_to_ptr(root.address[0], llmemory.GCREF) + +def _do_append_rpy_roots(gc, lst): + gc._count_rpy = 0 + gc._list_rpy = lst + gc.root_walker.walk_roots( + _append_rpy_root, + _append_rpy_root, + _append_rpy_root) + gc._list_rpy = None + +def get_rpy_roots(gc): + count = _do_count_rpy_roots(gc) + extra = 16 + while True: + result = [lltype.nullptr(llmemory.GCREF.TO)] * (count + extra) + try: + _do_append_rpy_roots(gc, result) + except ValueError: + extra *= 3 + else: + return result + +# ---------- implementation of pypy.rlib.rgc.get_rpy_referents() ---------- + +def _count_rpy_referent(pointer, gc): + gc._count_rpy += 1 + +def _do_count_rpy_referents(gc, gcref): + gc._count_rpy = 0 + gc.trace(llmemory.cast_ptr_to_adr(gcref), _count_rpy_referent, gc) + return gc._count_rpy + +def _append_rpy_referent(pointer, gc): + # Can use the gc list, but should not allocate! + # It is essential that the list is not resizable! + lst = gc._list_rpy + index = gc._count_rpy + if index >= len(lst): + raise ValueError + gc._count_rpy = index + 1 + lst[index] = llmemory.cast_adr_to_ptr(pointer.address[0], + llmemory.GCREF) + +def _do_append_rpy_referents(gc, gcref, lst): + gc._count_rpy = 0 + gc._list_rpy = lst + gc.trace(llmemory.cast_ptr_to_adr(gcref), _append_rpy_referent, gc) + +def get_rpy_referents(gc, gcref): + count = _do_count_rpy_referents(gc, gcref) + result = [lltype.nullptr(llmemory.GCREF.TO)] * count + _do_append_rpy_referents(gc, gcref, result) + return result + +# ---------- + +def get_rpy_memory_usage(gc, gcref): + return gc.get_size_incl_hash(llmemory.cast_ptr_to_adr(gcref)) + +def get_rpy_type_index(gc, gcref): + typeid = gc.get_type_id(llmemory.cast_ptr_to_adr(gcref)) + return gc.get_member_index(typeid) + +def is_rpy_instance(gc, gcref): + typeid = gc.get_type_id(llmemory.cast_ptr_to_adr(gcref)) + return gc.is_rpython_class(typeid) + +# ---------- + +raw_os_write = rffi.llexternal(underscore_on_windows+'write', + [rffi.INT, llmemory.Address, rffi.SIZE_T], + rffi.SIZE_T, + sandboxsafe=True, _nowrapper=True) + +AddressStack = get_address_stack() + +class HeapDumper: + _alloc_flavor_ = "raw" + BUFSIZE = 8192 # words + + def __init__(self, gc, fd): + self.gc = gc + self.fd = rffi.cast(rffi.INT, fd) + self.writebuffer = lltype.malloc(rffi.LONGP.TO, self.BUFSIZE, + flavor='raw') + self.buf_count = 0 + self.seen = AddressDict() + self.pending = AddressStack() + + def delete(self): + self.seen.delete() + self.pending.delete() + lltype.free(self.writebuffer, flavor='raw') + free_non_gc_object(self) + + def flush(self): + if self.buf_count > 0: + bytes = self.buf_count * rffi.sizeof(rffi.LONG) + count = raw_os_write(self.fd, + rffi.cast(llmemory.Address, self.writebuffer), + rffi.cast(rffi.SIZE_T, bytes)) + if rffi.cast(lltype.Signed, count) != bytes: + raise OSError(rposix.get_errno(), "raw_os_write failed") + self.buf_count = 0 + flush._dont_inline_ = True + + def write(self, value): + x = self.buf_count + self.writebuffer[x] = value + x += 1 + self.buf_count = x + if x == self.BUFSIZE: + self.flush() + write._always_inline_ = True + + def writeobj(self, obj): + gc = self.gc + typeid = gc.get_type_id(obj) + self.write(llmemory.cast_adr_to_int(obj)) + self.write(gc.get_member_index(typeid)) + self.write(gc.get_size_incl_hash(obj)) + gc.trace(obj, self._writeref, None) + self.write(-1) + + def _writeref(self, pointer, _): + obj = pointer.address[0] + self.write(llmemory.cast_adr_to_int(obj)) + self.add(obj) + + def add(self, obj): + if not self.seen.contains(obj): + self.seen.setitem(obj, obj) + self.pending.append(obj) + + def add_roots(self): + self.gc._heap_dumper = self + self.gc.root_walker.walk_roots( + _hd_add_root, + _hd_add_root, + _hd_add_root) + self.gc._heap_dumper = None + + def walk(self): + while self.pending.non_empty(): + self.writeobj(self.pending.pop()) + +def _hd_add_root(gc, root): + gc._heap_dumper.add(root.address[0]) + +def dump_rpy_heap(gc, fd): + heapdumper = HeapDumper(gc, fd) + heapdumper.add_roots() + heapdumper.walk() + heapdumper.flush() + heapdumper.delete() Modified: pypy/branch/gc-module/pypy/rpython/memory/gc/markcompact.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/memory/gc/markcompact.py (original) +++ pypy/branch/gc-module/pypy/rpython/memory/gc/markcompact.py Fri Sep 10 17:07:16 2010 @@ -674,8 +674,7 @@ return llmemory.cast_adr_to_int(obj) # not in an arena... return adr - self.space - def get_rpy_memory_usage(self, gcref): - obj = llmemory.cast_ptr_to_adr(gcref) + def get_size_incl_hash(self, obj): size = self.get_size(obj) hdr = self.header(obj) if hdr.tid & GCFLAG_HASHFIELD: Modified: pypy/branch/gc-module/pypy/rpython/memory/gc/semispace.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/memory/gc/semispace.py (original) +++ pypy/branch/gc-module/pypy/rpython/memory/gc/semispace.py Fri Sep 10 17:07:16 2010 @@ -331,9 +331,6 @@ size += llmemory.sizeof(lltype.Signed) return size - def get_rpy_memory_usage(self, gcref): - return self.get_size_incl_hash(llmemory.cast_ptr_to_adr(gcref)) - def scan_copied(self, scan): while scan < self.free: curr = scan + self.size_gc_header() Modified: pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py (original) +++ pypy/branch/gc-module/pypy/rpython/memory/gctransform/framework.py Fri Sep 10 17:07:16 2010 @@ -139,6 +139,8 @@ def __init__(self, translator): from pypy.rpython.memory.gc.base import choose_gc_from_config from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP + from pypy.rpython.memory.gc import inspect + super(FrameworkGCTransformer, self).__init__(translator, inline=True) if hasattr(self, 'GC_PARAMS'): # for tests: the GC choice can be specified as class attributes @@ -388,27 +390,30 @@ else: self.id_ptr = None - self.get_rpy_roots_ptr = getfn(GCClass.get_rpy_roots.im_func, + self.get_rpy_roots_ptr = getfn(inspect.get_rpy_roots, [s_gc], rgc.s_list_of_gcrefs(), minimal_transform=False) - self.get_rpy_referents_ptr = getfn(GCClass.get_rpy_referents.im_func, + self.get_rpy_referents_ptr = getfn(inspect.get_rpy_referents, [s_gc, s_gcref], rgc.s_list_of_gcrefs(), minimal_transform=False) - self.get_rpy_memory_usage_ptr = getfn( - GCClass.get_rpy_memory_usage.im_func, - [s_gc, s_gcref], - annmodel.SomeInteger(), - minimal_transform=False) - self.get_rpy_type_index_ptr = getfn(GCClass.get_rpy_type_index.im_func, + self.get_rpy_memory_usage_ptr = getfn(inspect.get_rpy_memory_usage, + [s_gc, s_gcref], + annmodel.SomeInteger(), + minimal_transform=False) + self.get_rpy_type_index_ptr = getfn(inspect.get_rpy_type_index, [s_gc, s_gcref], annmodel.SomeInteger(), minimal_transform=False) - self.is_rpy_instance_ptr = getfn(GCClass.is_rpy_instance.im_func, + self.is_rpy_instance_ptr = getfn(inspect.is_rpy_instance, [s_gc, s_gcref], annmodel.SomeBool(), minimal_transform=False) + self.dump_rpy_heap_ptr = getfn(inspect.dump_rpy_heap, + [s_gc, annmodel.SomeInteger()], + annmodel.s_None, + minimal_transform=False) self.set_max_heap_size_ptr = getfn(GCClass.set_max_heap_size.im_func, [s_gc, @@ -944,6 +949,14 @@ resultvar=hop.spaceop.result) self.pop_roots(hop, livevars) + def gct_gc_dump_rpy_heap(self, hop): + livevars = self.push_roots(hop) + [v_fd] = hop.spaceop.args + hop.genop("direct_call", + [self.dump_rpy_heap_ptr, self.c_const_gc, v_fd], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + def gct_malloc_nonmovable_varsize(self, hop): TYPE = hop.spaceop.result.concretetype if self.gcdata.gc.can_malloc_nonmovable(): Modified: pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py ============================================================================== --- pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py (original) +++ pypy/branch/gc-module/pypy/translator/c/test/test_newgc.py Fri Sep 10 17:07:16 2010 @@ -1063,6 +1063,31 @@ def test_get_rpy_type_index(self): self.run("get_rpy_type_index") + filename_dump = str(udir.join('test_dump_rpy_heap')) + def define_dump_rpy_heap(self): + U = lltype.GcStruct('U', ('x', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + A = lltype.GcArray(lltype.Ptr(S)) + filename = self.filename_dump + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + a = lltype.malloc(A, 1000) + s2 = lltype.malloc(S) + # + fd = os.open(filename, os.O_WRONLY | os.O_CREAT, 0666) + rgc.dump_rpy_heap(fd) + os.close(fd) + return 0 + + return fn + + def test_dump_rpy_heap(self): + self.run("dump_rpy_heap") + assert os.path.exists(self.filename_dump) + assert os.path.getsize(self.filename_dump) > 0 # minimal test + class TestSemiSpaceGC(TestUsingFramework, snippet.SemiSpaceGCTestDefines): gcpolicy = "semispace" From arigo at codespeak.net Fri Sep 10 17:17:13 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 10 Sep 2010 17:17:13 +0200 (CEST) Subject: [pypy-svn] r77003 - pypy/branch/gc-module/pypy/module/gc Message-ID: <20100910151713.4246F282B9C@codespeak.net> Author: arigo Date: Fri Sep 10 17:17:11 2010 New Revision: 77003 Modified: pypy/branch/gc-module/pypy/module/gc/referents.py Log: Add documentation. Modified: pypy/branch/gc-module/pypy/module/gc/referents.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/referents.py (original) +++ pypy/branch/gc-module/pypy/module/gc/referents.py Fri Sep 10 17:17:11 2010 @@ -149,5 +149,14 @@ get_referrers.unwrap_spec = [ObjSpace, 'args_w'] def dump_rpy_heap(space, fd): + """Write a full dump of the objects in the heap to the given file + descriptor. Format for each object (each item is one machine word): + + [addr] [typeindex] [size] [addr1]..[addrn] [-1] + + where [addr] is the address of the object, [typeindex] and [size] + are as get_rpy_type_index() and get_rpy_memory_usage() would return, + and [addr1]..[addrn] are addresses of other objects that this object + points to.""" rgc.dump_rpy_heap(fd) dump_rpy_heap.unwrap_spec = [ObjSpace, int] From arigo at codespeak.net Fri Sep 10 17:45:57 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 10 Sep 2010 17:45:57 +0200 (CEST) Subject: [pypy-svn] r77004 - in pypy/trunk/pypy: jit/backend jit/backend/llgraph jit/backend/test jit/backend/x86 jit/metainterp jit/metainterp/test module/pypyjit rlib Message-ID: <20100910154557.5F564282B9C@codespeak.net> Author: arigo Date: Fri Sep 10 17:45:54 2010 New Revision: 77004 Modified: pypy/trunk/pypy/jit/backend/llgraph/llimpl.py pypy/trunk/pypy/jit/backend/llgraph/runner.py pypy/trunk/pypy/jit/backend/model.py pypy/trunk/pypy/jit/backend/test/runner_test.py pypy/trunk/pypy/jit/backend/x86/assembler.py pypy/trunk/pypy/jit/backend/x86/runner.py pypy/trunk/pypy/jit/metainterp/compile.py pypy/trunk/pypy/jit/metainterp/jitdriver.py pypy/trunk/pypy/jit/metainterp/pyjitpl.py pypy/trunk/pypy/jit/metainterp/test/test_basic.py pypy/trunk/pypy/jit/metainterp/test/test_compile.py pypy/trunk/pypy/jit/metainterp/test/test_recursive.py pypy/trunk/pypy/jit/metainterp/test/test_warmstate.py pypy/trunk/pypy/jit/metainterp/warmspot.py pypy/trunk/pypy/jit/metainterp/warmstate.py pypy/trunk/pypy/module/pypyjit/interp_jit.py pypy/trunk/pypy/module/pypyjit/policy.py pypy/trunk/pypy/rlib/jit.py Log: Merge branch/jit-generator. It re-adds some supports for generators, and it also avoids the issue of call_assemblers whose target code is not yet compiled, by providing in this case a dummy piece of machine code that can later be patched. Modified: pypy/trunk/pypy/jit/backend/llgraph/llimpl.py ============================================================================== --- pypy/trunk/pypy/jit/backend/llgraph/llimpl.py (original) +++ pypy/trunk/pypy/jit/backend/llgraph/llimpl.py Fri Sep 10 17:45:54 2010 @@ -128,7 +128,7 @@ 'getarrayitem_raw_pure' : (('ref', 'int'), 'intorptr'), 'arraylen_gc' : (('ref',), 'int'), 'call' : (('ref', 'varargs'), 'intorptr'), - 'call_assembler' : (('ref', 'varargs'), 'intorptr'), + 'call_assembler' : (('varargs',), 'intorptr'), 'cond_call_gc_wb' : (('ptr', 'ptr'), None), 'oosend' : (('varargs',), 'intorptr'), 'oosend_pure' : (('varargs',), 'intorptr'), @@ -165,10 +165,13 @@ self.inputargs = [] self.operations = [] + def getargtypes(self): + return [v.concretetype for v in self.inputargs] + def __repr__(self): lines = [] self.as_text(lines, 1) - return 'CompiledLoop:\n%s' % '\n'.join(lines) + return 'CompiledLoop %s:\n%s' % (self.inputargs, '\n'.join(lines)) def as_text(self, lines, indent): for op in self.operations: @@ -839,6 +842,8 @@ def op_call_assembler(self, loop_token, *args): global _last_exception assert not self._forced + loop_token = self.cpu._redirected_call_assembler.get(loop_token, + loop_token) self._may_force = self.opindex try: inpargs = _from_opaque(loop_token._llgraph_compiled_version).inputargs @@ -861,6 +866,21 @@ vable = args[jd.index_of_virtualizable] else: vable = lltype.nullptr(llmemory.GCREF.TO) + # + # Emulate the fast path + if failindex == self.cpu.done_with_this_frame_int_v: + reset_vable(jd, vable) + return self.cpu.get_latest_value_int(0) + if failindex == self.cpu.done_with_this_frame_ref_v: + reset_vable(jd, vable) + return self.cpu.get_latest_value_ref(0) + if failindex == self.cpu.done_with_this_frame_float_v: + reset_vable(jd, vable) + return self.cpu.get_latest_value_float(0) + if failindex == self.cpu.done_with_this_frame_void_v: + reset_vable(jd, vable) + return None + # assembler_helper_ptr = jd.assembler_helper_adr.ptr # fish try: return assembler_helper_ptr(failindex, vable) @@ -1480,6 +1500,17 @@ else: return 0 +def reset_vable(jd, vable): + if jd.index_of_virtualizable != -1: + fielddescr = jd.vable_token_descr + do_setfield_gc_int(vable, fielddescr.ofs, 0) + +def redirect_call_assembler(cpu, oldlooptoken, newlooptoken): + OLD = _from_opaque(oldlooptoken._llgraph_compiled_version).getargtypes() + NEW = _from_opaque(newlooptoken._llgraph_compiled_version).getargtypes() + assert OLD == NEW + cpu._redirected_call_assembler[oldlooptoken] = newlooptoken + # ____________________________________________________________ Modified: pypy/trunk/pypy/jit/backend/llgraph/runner.py ============================================================================== --- pypy/trunk/pypy/jit/backend/llgraph/runner.py (original) +++ pypy/trunk/pypy/jit/backend/llgraph/runner.py Fri Sep 10 17:45:54 2010 @@ -102,6 +102,7 @@ llimpl._llinterp = LLInterpreter(self.rtyper) self._future_values = [] self._descrs = {} + self._redirected_call_assembler = {} def _freeze_(self): assert self.translate_support_code @@ -169,8 +170,8 @@ elif isinstance(x, history.ConstFloat): llimpl.compile_add_float_const(c, x.value) else: - raise Exception("%s args contain: %r" % (op.getopname(), - x)) + raise Exception("'%s' args contain: %r" % (op.getopname(), + x)) if op.is_guard(): faildescr = op.descr assert isinstance(faildescr, history.AbstractFailDescr) @@ -260,6 +261,11 @@ def clear_latest_values(self, count): llimpl.frame_clear_latest_values(self.latest_frame, count) + def redirect_call_assembler(self, oldlooptoken, newlooptoken): + if we_are_translated(): + raise ValueError("CALL_ASSEMBLER not supported") + llimpl.redirect_call_assembler(self, oldlooptoken, newlooptoken) + # ---------- def sizeof(self, S): Modified: pypy/trunk/pypy/jit/backend/model.py ============================================================================== --- pypy/trunk/pypy/jit/backend/model.py (original) +++ pypy/trunk/pypy/jit/backend/model.py Fri Sep 10 17:45:54 2010 @@ -107,6 +107,12 @@ GUARD_NO_EXCEPTION. (Returns a GCREF)""" # XXX remove me raise NotImplementedError + def redirect_call_assembler(self, oldlooptoken, newlooptoken): + """Redirect oldlooptoken to newlooptoken. More precisely, it is + enough to redirect all CALL_ASSEMBLERs already compiled that call + oldlooptoken so that from now own they will call newlooptoken.""" + raise NotImplementedError + @staticmethod def sizeof(S): raise NotImplementedError Modified: pypy/trunk/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/trunk/pypy/jit/backend/test/runner_test.py (original) +++ pypy/trunk/pypy/jit/backend/test/runner_test.py Fri Sep 10 17:45:54 2010 @@ -1824,6 +1824,7 @@ f2 = float_add(f0, f1) finish(f2)''' loop = parse(ops) + done_number = self.cpu.get_fail_descr_number(loop.operations[-1].descr) looptoken = LoopToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) @@ -1846,6 +1847,20 @@ assert self.cpu.get_latest_value_float(0) == 13.5 assert called + # test the fast path, which should not call assembler_helper() + del called[:] + self.cpu.done_with_this_frame_float_v = done_number + try: + othertoken = LoopToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.set_future_value_float(0, 1.2) + self.cpu.set_future_value_float(1, 3.2) + res = self.cpu.execute_token(othertoken) + assert self.cpu.get_latest_value_float(0) == 1.2 + 3.2 + assert not called + finally: + del self.cpu.done_with_this_frame_float_v + def test_raw_malloced_getarrayitem(self): ARRAY = rffi.CArray(lltype.Signed) descr = self.cpu.arraydescrof(ARRAY) @@ -1870,6 +1885,78 @@ assert a[5] == 12345 lltype.free(a, flavor='raw') + def test_redirect_call_assembler(self): + called = [] + def assembler_helper(failindex, virtualizable): + assert self.cpu.get_latest_value_float(0) == 1.25 + 3.25 + called.append(failindex) + return 13.5 + + FUNCPTR = lltype.Ptr(lltype.FuncType([lltype.Signed, llmemory.GCREF], + lltype.Float)) + class FakeJitDriverSD: + index_of_virtualizable = -1 + _assembler_helper_ptr = llhelper(FUNCPTR, assembler_helper) + assembler_helper_adr = llmemory.cast_ptr_to_adr( + _assembler_helper_ptr) + + ARGS = [lltype.Float, lltype.Float] + RES = lltype.Float + FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( + lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES) + + ops = ''' + [f0, f1] + f2 = float_add(f0, f1) + finish(f2)''' + loop = parse(ops) + looptoken = LoopToken() + looptoken.outermost_jitdriver_sd = FakeJitDriverSD() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.set_future_value_float(0, 1.25) + self.cpu.set_future_value_float(1, 2.35) + res = self.cpu.execute_token(looptoken) + assert self.cpu.get_latest_value_float(0) == 1.25 + 2.35 + assert not called + + ops = ''' + [f4, f5] + f3 = call_assembler(f4, f5, descr=looptoken) + guard_not_forced()[] + finish(f3) + ''' + loop = parse(ops, namespace=locals()) + othertoken = LoopToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + + # normal call_assembler: goes to looptoken + self.cpu.set_future_value_float(0, 1.25) + self.cpu.set_future_value_float(1, 3.25) + res = self.cpu.execute_token(othertoken) + assert self.cpu.get_latest_value_float(0) == 13.5 + assert called + del called[:] + + # compile a replacement + ops = ''' + [f0, f1] + f2 = float_sub(f0, f1) + finish(f2)''' + loop = parse(ops) + looptoken2 = LoopToken() + looptoken2.outermost_jitdriver_sd = FakeJitDriverSD() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken2) + + # install it + self.cpu.redirect_call_assembler(looptoken, looptoken2) + + # now, our call_assembler should go to looptoken2 + self.cpu.set_future_value_float(0, 6.0) + self.cpu.set_future_value_float(1, 1.5) # 6.0-1.5 == 1.25+3.25 + res = self.cpu.execute_token(othertoken) + assert self.cpu.get_latest_value_float(0) == 13.5 + assert called + class OOtypeBackendTest(BaseBackendTest): Modified: pypy/trunk/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/assembler.py (original) +++ pypy/trunk/pypy/jit/backend/x86/assembler.py Fri Sep 10 17:45:54 2010 @@ -328,6 +328,7 @@ looptoken._x86_direct_bootstrap_code = self.mc.tell() self._assemble_bootstrap_direct_call(arglocs, curadr, frame_depth+param_depth) + # debug_print("Loop #", looptoken.number, "has address", looptoken._x86_loop_code, "to", self.mc.tell()) self.mc.end_function() @@ -527,7 +528,6 @@ assert isinstance(loc, StackLoc) self.mc.MOVSD_bx(loc.value, xmmtmp.value) self.mc.JMP_l(jmpadr) - return adr_stackadjust def _assemble_bootstrap_direct_call_64(self, arglocs, jmpadr, stackdepth): # XXX: Very similar to _emit_call_64 @@ -580,9 +580,23 @@ # clobber the scratch register self.mc.MOV(loc, X86_64_SCRATCH_REG) + finaljmp = self.mc.tell() self.mc.JMP(imm(jmpadr)) - return adr_stackadjust + def redirect_call_assembler(self, oldlooptoken, newlooptoken): + # some minimal sanity checking + oldnonfloatlocs, oldfloatlocs = oldlooptoken._x86_arglocs + newnonfloatlocs, newfloatlocs = newlooptoken._x86_arglocs + assert len(oldnonfloatlocs) == len(newnonfloatlocs) + assert len(oldfloatlocs) == len(newfloatlocs) + # we overwrite the instructions at the old _x86_direct_bootstrap_code + # to start with a JMP to the new _x86_direct_bootstrap_code. + # Ideally we should rather patch all existing CALLs, but well. + oldadr = oldlooptoken._x86_direct_bootstrap_code + target = newlooptoken._x86_direct_bootstrap_code + mc = codebuf.InMemoryCodeBuilder(oldadr, oldadr + 16) + mc.JMP(imm(target)) + mc.done() def _assemble_bootstrap_code(self, inputargs, arglocs): nonfloatlocs, floatlocs = arglocs Modified: pypy/trunk/pypy/jit/backend/x86/runner.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/runner.py (original) +++ pypy/trunk/pypy/jit/backend/x86/runner.py Fri Sep 10 17:45:54 2010 @@ -134,6 +134,9 @@ assert fail_index == fail_index_2 return faildescr + def redirect_call_assembler(self, oldlooptoken, newlooptoken): + self.assembler.redirect_call_assembler(oldlooptoken, newlooptoken) + class CPU386(AbstractX86CPU): WORD = 4 NUM_REGS = 8 Modified: pypy/trunk/pypy/jit/metainterp/compile.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/compile.py (original) +++ pypy/trunk/pypy/jit/metainterp/compile.py Fri Sep 10 17:45:54 2010 @@ -14,6 +14,7 @@ from pypy.jit.metainterp.specnode import NotSpecNode, more_general_specnodes from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.jit.metainterp.optimizeutil import InvalidLoop +from pypy.jit.codewriter import heaptracker def giveup(): from pypy.jit.metainterp.pyjitpl import SwitchToBlackhole @@ -57,12 +58,9 @@ loop.inputargs = history.inputargs for box in loop.inputargs: assert isinstance(box, Box) - if start > 0: - ops = history.operations[start:] - else: - ops = history.operations # make a copy, because optimize_loop can mutate the ops and descrs - loop.operations = [op.clone() for op in ops] + h_ops = history.operations + loop.operations = [h_ops[i].clone() for i in range(start, len(h_ops))] metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd) @@ -550,3 +548,55 @@ descr = target_loop_token.finishdescr new_op = ResOperation(rop.FINISH, op.args, None, descr=descr) new_loop.operations[-1] = new_op + +# ____________________________________________________________ + +class PropagateExceptionDescr(AbstractFailDescr): + def handle_fail(self, metainterp_sd, jitdriver_sd): + cpu = metainterp_sd.cpu + exception = cpu.grab_exc_value() + raise metainterp_sd.ExitFrameWithExceptionRef(cpu, exception) + +propagate_exception_descr = PropagateExceptionDescr() + +def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redboxes): + """Make a LoopToken that corresponds to assembler code that just + calls back the interpreter. Used temporarily: a fully compiled + version of the code may end up replacing it. + """ + # 'redboxes' is only used to know the types of red arguments. + inputargs = [box.clonebox() for box in redboxes] + loop_token = make_loop_token(len(inputargs), jitdriver_sd) + # 'nb_red_args' might be smaller than len(redboxes), + # because it doesn't include the virtualizable boxes. + nb_red_args = jitdriver_sd.num_red_args + k = jitdriver_sd.portal_runner_adr + funcbox = history.ConstInt(heaptracker.adr2int(k)) + callargs = [funcbox] + greenboxes + inputargs[:nb_red_args] + # + result_type = jitdriver_sd.result_type + if result_type == history.INT: + result = BoxInt() + elif result_type == history.REF: + result = BoxPtr() + elif result_type == history.FLOAT: + result = BoxFloat() + elif result_type == history.VOID: + result = None + else: + assert 0, "bad result_type" + if result is not None: + finishargs = [result] + else: + finishargs = [] + # + jd = jitdriver_sd + faildescr = propagate_exception_descr + operations = [ + ResOperation(rop.CALL, callargs, result, descr=jd.portal_calldescr), + ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=faildescr), + ResOperation(rop.FINISH, finishargs, None, descr=jd.portal_finishtoken) + ] + operations[1].fail_args = [] + cpu.compile_loop(inputargs, operations, loop_token) + return loop_token Modified: pypy/trunk/pypy/jit/metainterp/jitdriver.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/jitdriver.py (original) +++ pypy/trunk/pypy/jit/metainterp/jitdriver.py Fri Sep 10 17:45:54 2010 @@ -10,10 +10,12 @@ # self.portal_runner_adr ... pypy.jit.metainterp.warmspot # self.portal_calldescr ... pypy.jit.metainterp.warmspot # self.num_green_args ... pypy.jit.metainterp.warmspot + # self.num_red_args ... pypy.jit.metainterp.warmspot # self.result_type ... pypy.jit.metainterp.warmspot # self.virtualizable_info... pypy.jit.metainterp.warmspot # self.warmstate ... pypy.jit.metainterp.warmspot # self.handle_jitexc_from_bh pypy.jit.metainterp.warmspot + # self.portal_finishtoken... pypy.jit.metainterp.pyjitpl # self.index ... pypy.jit.codewriter.call # self.mainjitcode ... pypy.jit.codewriter.call @@ -21,6 +23,7 @@ # self.assembler_helper_adr # self.index_of_virtualizable # self.vable_token_descr + # self.portal_calldescr # warmspot sets extra attributes starting with '_' for its own use. Modified: pypy/trunk/pypy/jit/metainterp/pyjitpl.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/pyjitpl.py (original) +++ pypy/trunk/pypy/jit/metainterp/pyjitpl.py Fri Sep 10 17:45:54 2010 @@ -690,25 +690,27 @@ targetjitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex] allboxes = greenboxes + redboxes warmrunnerstate = targetjitdriver_sd.warmstate - token = None + assembler_call = False if warmrunnerstate.inlining: if warmrunnerstate.can_inline_callable(greenboxes): portal_code = targetjitdriver_sd.mainjitcode return self.metainterp.perform_call(portal_code, allboxes, greenkey=greenboxes) - token = warmrunnerstate.get_assembler_token(greenboxes) + assembler_call = True # verify that we have all green args, needed to make sure # that assembler that we call is still correct self.verify_green_args(targetjitdriver_sd, greenboxes) # - return self.do_recursive_call(targetjitdriver_sd, allboxes, token) + return self.do_recursive_call(targetjitdriver_sd, allboxes, + assembler_call) - def do_recursive_call(self, targetjitdriver_sd, allboxes, token=None): + def do_recursive_call(self, targetjitdriver_sd, allboxes, + assembler_call=False): portal_code = targetjitdriver_sd.mainjitcode k = targetjitdriver_sd.portal_runner_adr funcbox = ConstInt(heaptracker.adr2int(k)) - return self.do_residual_call(funcbox, portal_code.calldescr, - allboxes, assembler_call_token=token, + return self.do_residual_call(funcbox, portal_code.calldescr, allboxes, + assembler_call=assembler_call, assembler_call_jd=targetjitdriver_sd) opimpl_recursive_call_i = _opimpl_recursive_call @@ -828,8 +830,6 @@ self.metainterp.reached_loop_header(greenboxes, redboxes) self.pc = saved_pc else: - warmrunnerstate = jitdriver_sd.warmstate - token = warmrunnerstate.get_assembler_token(greenboxes) # warning! careful here. We have to return from the current # frame containing the jit_merge_point, and then use # do_recursive_call() to follow the recursive call. This is @@ -843,7 +843,8 @@ except ChangeFrame: pass frame = self.metainterp.framestack[-1] - frame.do_recursive_call(jitdriver_sd, greenboxes + redboxes, token) + frame.do_recursive_call(jitdriver_sd, greenboxes + redboxes, + assembler_call=True) raise ChangeFrame def debug_merge_point(self, jitdriver_sd, greenkey): @@ -1058,7 +1059,7 @@ return resbox def do_residual_call(self, funcbox, descr, argboxes, - assembler_call_token=None, + assembler_call=False, assembler_call_jd=None): # First build allboxes: it may need some reordering from the # list provided in argboxes, depending on the order in which @@ -1096,16 +1097,15 @@ if (effectinfo is None or effectinfo.extraeffect == effectinfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE or - assembler_call_token is not None): + assembler_call): # residual calls require attention to keep virtualizables in-sync self.metainterp.clear_exception() self.metainterp.vable_and_vrefs_before_residual_call() resbox = self.metainterp.execute_and_record_varargs( rop.CALL_MAY_FORCE, allboxes, descr=descr) self.metainterp.vrefs_after_residual_call() - if assembler_call_token is not None: - self.metainterp.direct_assembler_call(assembler_call_token, - assembler_call_jd) + if assembler_call: + self.metainterp.direct_assembler_call(assembler_call_jd) if resbox is not None: self.make_result_of_lastop(resbox) self.metainterp.vable_after_residual_call() @@ -1217,6 +1217,7 @@ history.FLOAT: 'float', history.VOID: 'void'}[jd.result_type] tokens = getattr(self, 'loop_tokens_done_with_this_frame_%s' % name) + jd.portal_finishtoken = tokens[0].finishdescr num = self.cpu.get_fail_descr_number(tokens[0].finishdescr) setattr(self.cpu, 'done_with_this_frame_%s_v' % name, num) # @@ -2103,20 +2104,24 @@ op.args = [resbox_as_const] + op.args return resbox - def direct_assembler_call(self, token, targetjitdriver_sd): + def direct_assembler_call(self, targetjitdriver_sd): """ Generate a direct call to assembler for portal entry point, patching the CALL_MAY_FORCE that occurred just now. """ op = self.history.operations.pop() assert op.opnum == rop.CALL_MAY_FORCE num_green_args = targetjitdriver_sd.num_green_args - args = op.args[num_green_args + 1:] + greenargs = op.args[1:num_green_args+1] + args = op.args[num_green_args+1:] + assert len(args) == targetjitdriver_sd.num_red_args vinfo = targetjitdriver_sd.virtualizable_info if vinfo is not None: index = targetjitdriver_sd.index_of_virtualizable vbox = args[index] args = args + self.gen_load_from_other_virtualizable(vinfo, vbox) # ^^^ and not "+=", which makes 'args' a resizable list + warmrunnerstate = targetjitdriver_sd.warmstate + token = warmrunnerstate.get_assembler_token(greenargs, args) op.opnum = rop.CALL_ASSEMBLER op.args = args op.descr = token Modified: pypy/trunk/pypy/jit/metainterp/test/test_basic.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_basic.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_basic.py Fri Sep 10 17:45:54 2010 @@ -555,6 +555,32 @@ assert res == -2 self.check_loop_count(1) + def test_can_never_inline(self): + def can_never_inline(x): + return x > 50 + myjitdriver = JitDriver(greens = ['x'], reds = ['y'], + can_never_inline = can_never_inline) + @dont_look_inside + def marker(): + pass + def f(x, y): + while y >= 0: + myjitdriver.can_enter_jit(x=x, y=y) + myjitdriver.jit_merge_point(x=x, y=y) + x += 1 + if x == 4 or x == 61: + marker() + y -= x + return y + # + res = self.meta_interp(f, [3, 6], repeat=7) + assert res == 6 - 4 - 5 + self.check_history(call=0) # because the trace starts in the middle + # + res = self.meta_interp(f, [60, 84], repeat=7) + assert res == 84 - 61 - 62 + self.check_history(call=1) # because the trace starts immediately + def test_format(self): def f(n): return len("<%d>" % n) Modified: pypy/trunk/pypy/jit/metainterp/test/test_compile.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_compile.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_compile.py Fri Sep 10 17:45:54 2010 @@ -1,10 +1,11 @@ from pypy.jit.metainterp.history import LoopToken, ConstInt, History, Stats -from pypy.jit.metainterp.history import BoxInt +from pypy.jit.metainterp.history import BoxInt, INT from pypy.jit.metainterp.specnode import NotSpecNode, ConstantSpecNode from pypy.jit.metainterp.compile import insert_loop_token, compile_new_loop from pypy.jit.metainterp.compile import ResumeGuardDescr from pypy.jit.metainterp.compile import ResumeGuardCountersInt -from pypy.jit.metainterp import optimize, jitprof, typesystem +from pypy.jit.metainterp.compile import compile_tmp_callback +from pypy.jit.metainterp import optimize, jitprof, typesystem, compile from pypy.jit.metainterp.test.oparser import parse from pypy.jit.metainterp.test.test_optimizefindnode import LLtypeMixin @@ -154,3 +155,63 @@ count = rgc.see_int(192) assert count == 1 assert rgc.counters == [1, 1, 7, 6, 1] + + +def test_compile_tmp_callback(): + from pypy.jit.codewriter import heaptracker + from pypy.jit.backend.llgraph import runner + from pypy.rpython.lltypesystem import lltype, llmemory + from pypy.rpython.annlowlevel import llhelper + from pypy.rpython.llinterp import LLException + # + cpu = runner.LLtypeCPU(None) + FUNC = lltype.FuncType([lltype.Signed]*4, lltype.Signed) + def ll_portal_runner(g1, g2, r3, r4): + assert (g1, g2, r3, r4) == (12, 34, -156, -178) + if raiseme: + raise raiseme + else: + return 54321 + # + class FakeJitDriverSD: + portal_runner_ptr = llhelper(lltype.Ptr(FUNC), ll_portal_runner) + portal_runner_adr = llmemory.cast_ptr_to_adr(portal_runner_ptr) + portal_calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + portal_finishtoken = compile.DoneWithThisFrameDescrInt() + result_type = INT + # + loop_token = compile_tmp_callback(cpu, FakeJitDriverSD(), + [ConstInt(12), ConstInt(34)], + [BoxInt(56), ConstInt(78)]) + # + raiseme = None + cpu.set_future_value_int(0, -156) + cpu.set_future_value_int(1, -178) + fail_descr = cpu.execute_token(loop_token) + assert fail_descr is FakeJitDriverSD().portal_finishtoken + # + EXC = lltype.GcStruct('EXC') + llexc = lltype.malloc(EXC) + raiseme = LLException("exception class", llexc) + cpu.set_future_value_int(0, -156) + cpu.set_future_value_int(1, -178) + fail_descr = cpu.execute_token(loop_token) + assert isinstance(fail_descr, compile.PropagateExceptionDescr) + got = cpu.grab_exc_value() + assert lltype.cast_opaque_ptr(lltype.Ptr(EXC), got) == llexc + # + class FakeMetaInterpSD: + class ExitFrameWithExceptionRef(Exception): + pass + FakeMetaInterpSD.cpu = cpu + class FakeJitDriverSD: + pass + cpu.set_future_value_int(0, -156) + cpu.set_future_value_int(1, -178) + fail_descr = cpu.execute_token(loop_token) + try: + fail_descr.handle_fail(FakeMetaInterpSD(), FakeJitDriverSD()) + except FakeMetaInterpSD.ExitFrameWithExceptionRef, e: + assert lltype.cast_opaque_ptr(lltype.Ptr(EXC), e.args[1]) == llexc + else: + assert 0, "should have raised" Modified: pypy/trunk/pypy/jit/metainterp/test/test_recursive.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_recursive.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_recursive.py Fri Sep 10 17:45:54 2010 @@ -612,9 +612,88 @@ driver.can_enter_jit(codeno=codeno, i=i, j=j) portal(2, 50) - self.meta_interp(portal, [2, 20], inline=True) - self.check_loops(call_assembler=0, call_may_force=1, - everywhere=True) + + from pypy.jit.metainterp import compile, pyjitpl + pyjitpl._warmrunnerdesc = None + trace = [] + def my_ctc(*args): + looptoken = original_ctc(*args) + trace.append(looptoken) + return looptoken + original_ctc = compile.compile_tmp_callback + try: + compile.compile_tmp_callback = my_ctc + self.meta_interp(portal, [2, 20], inline=True) + self.check_loops(call_assembler=1, call_may_force=0, + everywhere=True) + finally: + compile.compile_tmp_callback = original_ctc + # check that we made a temporary callback + assert len(trace) == 1 + # and that we later redirected it to something else + try: + redirected = pyjitpl._warmrunnerdesc.cpu._redirected_call_assembler + except AttributeError: + pass # not the llgraph backend + else: + print redirected + assert redirected.keys() == trace + + def test_recursion_cant_call_assembler_directly_with_virtualizable(self): + # exactly the same logic as the previous test, but with 'frame.j' + # instead of just 'j' + class Frame(object): + _virtualizable2_ = ['j'] + def __init__(self, j): + self.j = j + + driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], + virtualizables = ['frame'], + get_printable_location = lambda codeno : str(codeno)) + + def portal(codeno, frame): + i = 0 + while 1: + driver.jit_merge_point(codeno=codeno, i=i, frame=frame) + if i == 1: + if frame.j == 0: + return + portal(2, Frame(frame.j - 1)) + elif i == 3: + return + i += 1 + driver.can_enter_jit(codeno=codeno, i=i, frame=frame) + + def main(codeno, j): + portal(codeno, Frame(j)) + + main(2, 50) + + from pypy.jit.metainterp import compile, pyjitpl + pyjitpl._warmrunnerdesc = None + trace = [] + def my_ctc(*args): + looptoken = original_ctc(*args) + trace.append(looptoken) + return looptoken + original_ctc = compile.compile_tmp_callback + try: + compile.compile_tmp_callback = my_ctc + self.meta_interp(main, [2, 20], inline=True) + self.check_loops(call_assembler=1, call_may_force=0, + everywhere=True) + finally: + compile.compile_tmp_callback = original_ctc + # check that we made a temporary callback + assert len(trace) == 1 + # and that we later redirected it to something else + try: + redirected = pyjitpl._warmrunnerdesc.cpu._redirected_call_assembler + except AttributeError: + pass # not the llgraph backend + else: + print redirected + assert redirected.keys() == trace def test_directly_call_assembler_return(self): driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'], Modified: pypy/trunk/pypy/jit/metainterp/test/test_warmstate.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_warmstate.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_warmstate.py Fri Sep 10 17:45:54 2010 @@ -162,13 +162,16 @@ assert cell1.entry_loop_token == "entry loop token" def test_make_jitdriver_callbacks_1(): + class FakeWarmRunnerDesc: + cpu = None class FakeJitDriverSD: _green_args_spec = [lltype.Signed, lltype.Float] _get_printable_location_ptr = None _confirm_enter_jit_ptr = None + _can_never_inline_ptr = None class FakeCell: dont_trace_here = False - state = WarmEnterState(None, FakeJitDriverSD()) + state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) def jit_getter(build, *args): return FakeCell() state.jit_getter = jit_getter @@ -185,10 +188,12 @@ lltype.Ptr(rstr.STR))) class FakeWarmRunnerDesc: rtyper = None + cpu = None class FakeJitDriverSD: _green_args_spec = [lltype.Signed, lltype.Float] _get_printable_location_ptr = llhelper(GET_LOCATION, get_location) _confirm_enter_jit_ptr = None + _can_never_inline_ptr = None _get_jitcell_at_ptr = None state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) state.make_jitdriver_callbacks() @@ -205,13 +210,37 @@ lltype.Signed], lltype.Bool)) class FakeWarmRunnerDesc: rtyper = None + cpu = None class FakeJitDriverSD: _green_args_spec = [lltype.Signed, lltype.Float] _get_printable_location_ptr = None _confirm_enter_jit_ptr = llhelper(ENTER_JIT, confirm_enter_jit) + _can_never_inline_ptr = None _get_jitcell_at_ptr = None state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) state.make_jitdriver_callbacks() res = state.confirm_enter_jit(5, 42.5, 3) assert res is True + +def test_make_jitdriver_callbacks_5(): + def can_never_inline(x, y): + assert x == 5 + assert y == 42.5 + return True + CAN_NEVER_INLINE = lltype.Ptr(lltype.FuncType( + [lltype.Signed, lltype.Float], lltype.Bool)) + class FakeWarmRunnerDesc: + rtyper = None + cpu = None + class FakeJitDriverSD: + _green_args_spec = [lltype.Signed, lltype.Float] + _get_printable_location_ptr = None + _confirm_enter_jit_ptr = None + _can_never_inline_ptr = llhelper(CAN_NEVER_INLINE, can_never_inline) + _get_jitcell_at_ptr = None + + state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + state.make_jitdriver_callbacks() + res = state.can_never_inline(5, 42.5) + assert res is True Modified: pypy/trunk/pypy/jit/metainterp/warmspot.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/warmspot.py (original) +++ pypy/trunk/pypy/jit/metainterp/warmspot.py Fri Sep 10 17:45:54 2010 @@ -425,6 +425,8 @@ jd._confirm_enter_jit_ptr = self._make_hook_graph(jd, annhelper, jd.jitdriver.confirm_enter_jit, annmodel.s_Bool, onlygreens=False) + jd._can_never_inline_ptr = self._make_hook_graph(jd, + annhelper, jd.jitdriver.can_never_inline, annmodel.s_Bool) annhelper.finish() def _make_hook_graph(self, jitdriver_sd, annhelper, func, @@ -455,6 +457,7 @@ jd._green_args_spec = [v.concretetype for v in greens_v] jd._red_args_types = [history.getkind(v.concretetype) for v in reds_v] jd.num_green_args = len(jd._green_args_spec) + jd.num_red_args = len(jd._red_args_types) RESTYPE = graph.getreturnvar().concretetype (jd._JIT_ENTER_FUNCTYPE, jd._PTR_JIT_ENTER_FUNCTYPE) = self.cpu.ts.get_FuncType(ALLARGS, lltype.Void) @@ -582,9 +585,11 @@ ts = self.cpu.ts def ll_portal_runner(*args): + start = True while 1: try: - jd._maybe_enter_from_start_fn(*args) + if start: + jd._maybe_enter_from_start_fn(*args) return support.maybe_on_top_of_llinterp(rtyper, portal_ptr)(*args) except self.ContinueRunningNormally, e: @@ -593,6 +598,8 @@ x = getattr(e, attrname)[count] x = specialize_value(ARGTYPE, x) args = args + (x,) + start = False + continue except self.DoneWithThisFrameVoid: assert result_kind == 'void' return Modified: pypy/trunk/pypy/jit/metainterp/warmstate.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/warmstate.py (original) +++ pypy/trunk/pypy/jit/metainterp/warmstate.py Fri Sep 10 17:45:54 2010 @@ -211,7 +211,11 @@ entry_loop_token): cell = self.jit_cell_at_key(greenkey) cell.counter = -1 + old_token = cell.entry_loop_token cell.entry_loop_token = entry_loop_token + if old_token is not None: + cpu = self.warmrunnerdesc.cpu + cpu.redirect_call_assembler(old_token, entry_loop_token) # ---------- @@ -491,8 +495,12 @@ # unwrap_greenkey = self.make_unwrap_greenkey() jit_getter = self.make_jitcell_getter() + jd = self.jitdriver_sd + cpu = self.warmrunnerdesc.cpu def can_inline_greenargs(*greenargs): + if can_never_inline(*greenargs): + return False cell = jit_getter(False, *greenargs) if cell is not None and cell.dont_trace_here: return False @@ -503,11 +511,13 @@ self.can_inline_greenargs = can_inline_greenargs self.can_inline_callable = can_inline_callable - def get_assembler_token(greenkey): - greenargs = unwrap_greenkey(greenkey) - cell = jit_getter(False, *greenargs) - if cell is None or cell.counter >= 0: - return None + def get_assembler_token(greenkey, redboxes): + # 'redboxes' is only used to know the types of red arguments + cell = self.jit_cell_at_key(greenkey) + if cell.entry_loop_token is None: + from pypy.jit.metainterp.compile import compile_tmp_callback + cell.entry_loop_token = compile_tmp_callback(cpu, jd, greenkey, + redboxes) return cell.entry_loop_token self.get_assembler_token = get_assembler_token @@ -546,3 +556,16 @@ confirm_enter_jit_ptr) return fn(*args) self.confirm_enter_jit = confirm_enter_jit + # + can_never_inline_ptr = self.jitdriver_sd._can_never_inline_ptr + if can_never_inline_ptr is None: + def can_never_inline(*greenargs): + return False + else: + rtyper = self.warmrunnerdesc.rtyper + # + def can_never_inline(*greenargs): + fn = support.maybe_on_top_of_llinterp(rtyper, + can_never_inline_ptr) + return fn(*greenargs) + self.can_never_inline = can_never_inline Modified: pypy/trunk/pypy/module/pypyjit/interp_jit.py ============================================================================== --- pypy/trunk/pypy/module/pypyjit/interp_jit.py (original) +++ pypy/trunk/pypy/module/pypyjit/interp_jit.py Fri Sep 10 17:45:54 2010 @@ -36,11 +36,13 @@ bytecode.jit_cells[next_instr] = newcell def confirm_enter_jit(next_instr, bytecode, frame, ec): - return (not (bytecode.co_flags & CO_GENERATOR) and - frame.w_f_trace is None and + return (frame.w_f_trace is None and ec.profilefunc is None and ec.w_tracefunc is None) +def can_never_inline(next_instr, bytecode): + return (bytecode.co_flags & CO_GENERATOR) != 0 + class PyPyJitDriver(JitDriver): reds = ['frame', 'ec'] @@ -58,7 +60,8 @@ pypyjitdriver = PyPyJitDriver(get_printable_location = get_printable_location, get_jitcell_at = get_jitcell_at, set_jitcell_at = set_jitcell_at, - confirm_enter_jit = confirm_enter_jit) + confirm_enter_jit = confirm_enter_jit, + can_never_inline = can_never_inline) class __extend__(PyFrame): Modified: pypy/trunk/pypy/module/pypyjit/policy.py ============================================================================== --- pypy/trunk/pypy/module/pypyjit/policy.py (original) +++ pypy/trunk/pypy/module/pypyjit/policy.py Fri Sep 10 17:45:54 2010 @@ -32,8 +32,6 @@ return False if mod.startswith('pypy.interpreter.pyparser.'): return False - if mod == 'pypy.interpreter.generator': - return False if mod.startswith('pypy.module.'): modname = mod[len('pypy.module.'):] if not self.look_inside_pypy_module(modname): Modified: pypy/trunk/pypy/rlib/jit.py ============================================================================== --- pypy/trunk/pypy/rlib/jit.py (original) +++ pypy/trunk/pypy/rlib/jit.py Fri Sep 10 17:45:54 2010 @@ -253,7 +253,8 @@ def __init__(self, greens=None, reds=None, virtualizables=None, get_jitcell_at=None, set_jitcell_at=None, - get_printable_location=None, confirm_enter_jit=None): + get_printable_location=None, confirm_enter_jit=None, + can_never_inline=None): if greens is not None: self.greens = greens if reds is not None: @@ -270,6 +271,7 @@ self.set_jitcell_at = set_jitcell_at self.get_printable_location = get_printable_location self.confirm_enter_jit = confirm_enter_jit + self.can_never_inline = can_never_inline def _freeze_(self): return True From arigo at codespeak.net Fri Sep 10 17:46:15 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 10 Sep 2010 17:46:15 +0200 (CEST) Subject: [pypy-svn] r77005 - pypy/branch/jit-generator Message-ID: <20100910154615.42794282B9C@codespeak.net> Author: arigo Date: Fri Sep 10 17:46:13 2010 New Revision: 77005 Removed: pypy/branch/jit-generator/ Log: Remove merged branch. From afa at codespeak.net Sat Sep 11 00:38:14 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Sat, 11 Sep 2010 00:38:14 +0200 (CEST) Subject: [pypy-svn] r77006 - pypy/branch/fast-forward/lib_pypy Message-ID: <20100910223814.05DD8282B9C@codespeak.net> Author: afa Date: Sat Sep 11 00:38:12 2010 New Revision: 77006 Modified: pypy/branch/fast-forward/lib_pypy/struct.py Log: Fix a probable typo Modified: pypy/branch/fast-forward/lib_pypy/struct.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/struct.py (original) +++ pypy/branch/fast-forward/lib_pypy/struct.py Sat Sep 11 00:38:12 2010 @@ -119,7 +119,7 @@ unsigned = 0 for i in range(8): unsigned |= ord(binary[i]) << (i * 8) - return float_unpack(unsigned, size) + return float_unpack(unsigned, size, le) def round_to_nearest(x): """Python 3 style round: round a float x to the nearest int, but From afa at codespeak.net Sat Sep 11 00:50:39 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Sat, 11 Sep 2010 00:50:39 +0200 (CEST) Subject: [pypy-svn] r77007 - pypy/branch/fast-forward/pypy/bin Message-ID: <20100910225039.0FF6B282B9C@codespeak.net> Author: afa Date: Sat Sep 11 00:50:37 2010 New Revision: 77007 Modified: pypy/branch/fast-forward/pypy/bin/py.py Log: I don't know how to force an option to terminates the option list, but at least this change does the right thing with: bin/py.py -m test.regrtest -v test_sys i.e. '-v' is an option of the regrtest script, not for bin/py.py. Modified: pypy/branch/fast-forward/pypy/bin/py.py ============================================================================== --- pypy/branch/fast-forward/pypy/bin/py.py (original) +++ pypy/branch/fast-forward/pypy/bin/py.py Sat Sep 11 00:50:37 2010 @@ -33,12 +33,12 @@ default=False, cmdline="-O"), BoolOption("no_site_import", "do not 'import site' on initialization", default=False, cmdline="-S"), - StrOption("runmodule", - "library module to be run as a script (terminates option list)", - default=None, cmdline="-m"), - StrOption("runcommand", - "program passed in as CMD (terminates option list)", - default=None, cmdline="-c"), + BoolOption("runmodule", + "library module to be run as a script (terminates option list)", + default=False, cmdline="-m"), + BoolOption("runcommand", + "program passed in as CMD (terminates option list)", + default=False, cmdline="-c"), StrOption("warn", "warning control (arg is action:message:category:module:lineno)", default=None, cmdline="-W"), @@ -116,19 +116,22 @@ go_interactive = interactiveconfig.interactive banner = '' exit_status = 0 - if interactiveconfig.runcommand is not None: - args = ['-c'] + args + command = None + if interactiveconfig.runcommand: + command = args[0] + args[0] = '-c' + if interactiveconfig.runmodule: + command = args.pop(0) for arg in args: space.call_method(space.sys.get('argv'), 'append', space.wrap(arg)) # load the source of the program given as command-line argument - if interactiveconfig.runcommand is not None: + if interactiveconfig.runcommand: def doit(): - main.run_string(interactiveconfig.runcommand, space=space) + main.run_string(command, space=space) elif interactiveconfig.runmodule: def doit(): - main.run_module(interactiveconfig.runmodule, - args, space=space) + main.run_module(command, args, space=space) elif args: scriptdir = os.path.dirname(os.path.abspath(args[0])) space.call_method(space.sys.get('path'), 'insert', From afa at codespeak.net Sat Sep 11 00:55:23 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Sat, 11 Sep 2010 00:55:23 +0200 (CEST) Subject: [pypy-svn] r77008 - pypy/branch/fast-forward/pypy/config Message-ID: <20100910225523.466F5282B9C@codespeak.net> Author: afa Date: Sat Sep 11 00:55:21 2010 New Revision: 77008 Modified: pypy/branch/fast-forward/pypy/config/pypyoption.py Log: On Windows, site.py accesses the user's default codepage. the _locale module must be enabled. Modified: pypy/branch/fast-forward/pypy/config/pypyoption.py ============================================================================== --- pypy/branch/fast-forward/pypy/config/pypyoption.py (original) +++ pypy/branch/fast-forward/pypy/config/pypyoption.py Sat Sep 11 00:55:21 2010 @@ -48,9 +48,8 @@ del working_modules["termios"] del working_modules["_minimal_curses"] - # The _locale module is probably incomplete, - # but enough for the tests to pass on Windows - working_modules["_locale"] = None + # The _locale module is needed by site.py on Windows + default_modules["_locale"] = None if sys.platform == "sunos5": del working_modules['mmap'] # depend on ctypes, can't get at c-level 'errono' From arigo at codespeak.net Sat Sep 11 10:38:50 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sat, 11 Sep 2010 10:38:50 +0200 (CEST) Subject: [pypy-svn] r77009 - in pypy/branch/gc-module/pypy: module/gc rpython/memory/gc Message-ID: <20100911083850.73F48282BDC@codespeak.net> Author: arigo Date: Sat Sep 11 10:38:47 2010 New Revision: 77009 Modified: pypy/branch/gc-module/pypy/module/gc/referents.py pypy/branch/gc-module/pypy/rpython/memory/gc/inspect.py Log: Catch the RPython-level OSError. Add a marker between the roots and the rest of the dump. Modified: pypy/branch/gc-module/pypy/module/gc/referents.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/referents.py (original) +++ pypy/branch/gc-module/pypy/module/gc/referents.py Sat Sep 11 10:38:47 2010 @@ -2,6 +2,7 @@ from pypy.interpreter.baseobjspace import W_Root, Wrappable from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import ObjSpace +from pypy.interpreter.error import wrap_oserror from pypy.rlib.objectmodel import we_are_translated @@ -158,5 +159,8 @@ are as get_rpy_type_index() and get_rpy_memory_usage() would return, and [addr1]..[addrn] are addresses of other objects that this object points to.""" - rgc.dump_rpy_heap(fd) + try: + rgc.dump_rpy_heap(fd) + except OSError, e: + raise wrap_oserror(space, e) dump_rpy_heap.unwrap_spec = [ObjSpace, int] Modified: pypy/branch/gc-module/pypy/rpython/memory/gc/inspect.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/memory/gc/inspect.py (original) +++ pypy/branch/gc-module/pypy/rpython/memory/gc/inspect.py Sat Sep 11 10:38:47 2010 @@ -172,6 +172,11 @@ _hd_add_root, _hd_add_root) self.gc._heap_dumper = None + # a marker to mean "end of the roots" + self.write(0) + self.write(0) + self.write(0) + self.write(-1) def walk(self): while self.pending.non_empty(): From arigo at codespeak.net Sat Sep 11 10:41:58 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sat, 11 Sep 2010 10:41:58 +0200 (CEST) Subject: [pypy-svn] r77010 - pypy/branch/gc-module/pypy/module/gc Message-ID: <20100911084158.93B4B282BDC@codespeak.net> Author: arigo Date: Sat Sep 11 10:41:57 2010 New Revision: 77010 Modified: pypy/branch/gc-module/pypy/module/gc/referents.py Log: Document it. Modified: pypy/branch/gc-module/pypy/module/gc/referents.py ============================================================================== --- pypy/branch/gc-module/pypy/module/gc/referents.py (original) +++ pypy/branch/gc-module/pypy/module/gc/referents.py Sat Sep 11 10:41:57 2010 @@ -158,7 +158,9 @@ where [addr] is the address of the object, [typeindex] and [size] are as get_rpy_type_index() and get_rpy_memory_usage() would return, and [addr1]..[addrn] are addresses of other objects that this object - points to.""" + points to. The full dump is a list of such objects, with a marker + [0][0][0][-1] inserted after all GC roots, before all non-roots. + """ try: rgc.dump_rpy_heap(fd) except OSError, e: From arigo at codespeak.net Sat Sep 11 10:54:35 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sat, 11 Sep 2010 10:54:35 +0200 (CEST) Subject: [pypy-svn] r77011 - pypy/branch/gc-module/pypy/rpython/memory/gc Message-ID: <20100911085435.76FA4282BDC@codespeak.net> Author: arigo Date: Sat Sep 11 10:54:34 2010 New Revision: 77011 Modified: pypy/branch/gc-module/pypy/rpython/memory/gc/inspect.py Log: Fix. Modified: pypy/branch/gc-module/pypy/rpython/memory/gc/inspect.py ============================================================================== --- pypy/branch/gc-module/pypy/rpython/memory/gc/inspect.py (original) +++ pypy/branch/gc-module/pypy/rpython/memory/gc/inspect.py Sat Sep 11 10:54:34 2010 @@ -146,6 +146,12 @@ self.flush() write._always_inline_ = True + def write_marker(self): + self.write(0) + self.write(0) + self.write(0) + self.write(-1) + def writeobj(self, obj): gc = self.gc typeid = gc.get_type_id(obj) @@ -172,15 +178,15 @@ _hd_add_root, _hd_add_root) self.gc._heap_dumper = None - # a marker to mean "end of the roots" - self.write(0) - self.write(0) - self.write(0) - self.write(-1) - - def walk(self): - while self.pending.non_empty(): - self.writeobj(self.pending.pop()) + pendingroots = self.pending + self.pending = AddressStack() + self.walk(pendingroots) + pendingroots.delete() + self.write_marker() + + def walk(self, pending): + while pending.non_empty(): + self.writeobj(pending.pop()) def _hd_add_root(gc, root): gc._heap_dumper.add(root.address[0]) @@ -188,6 +194,6 @@ def dump_rpy_heap(gc, fd): heapdumper = HeapDumper(gc, fd) heapdumper.add_roots() - heapdumper.walk() + heapdumper.walk(heapdumper.pending) heapdumper.flush() heapdumper.delete() From arigo at codespeak.net Sat Sep 11 15:14:18 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sat, 11 Sep 2010 15:14:18 +0200 (CEST) Subject: [pypy-svn] r77020 - pypy/branch/gc-module/pypy/rlib Message-ID: <20100911131418.30066282BDC@codespeak.net> Author: arigo Date: Sat Sep 11 15:14:16 2010 New Revision: 77020 Modified: pypy/branch/gc-module/pypy/rlib/rgc.py Log: Forgotten. Modified: pypy/branch/gc-module/pypy/rlib/rgc.py ============================================================================== --- pypy/branch/gc-module/pypy/rlib/rgc.py (original) +++ pypy/branch/gc-module/pypy/rlib/rgc.py Sat Sep 11 15:14:16 2010 @@ -521,4 +521,5 @@ return s_None def specialize_call(self, hop): vlist = hop.inputargs(lltype.Signed) + hop.exception_is_here() return hop.genop('gc_dump_rpy_heap', vlist, resulttype = hop.r_result) From arigo at codespeak.net Sat Sep 11 17:55:38 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sat, 11 Sep 2010 17:55:38 +0200 (CEST) Subject: [pypy-svn] r77021 - pypy/trunk/pypy/module/_weakref Message-ID: <20100911155538.E4DDF282BD4@codespeak.net> Author: arigo Date: Sat Sep 11 17:55:36 2010 New Revision: 77021 Modified: pypy/trunk/pypy/module/_weakref/interp__weakref.py Log: Improve the error message. Modified: pypy/trunk/pypy/module/_weakref/interp__weakref.py ============================================================================== --- pypy/trunk/pypy/module/_weakref/interp__weakref.py (original) +++ pypy/trunk/pypy/module/_weakref/interp__weakref.py Sat Sep 11 17:55:36 2010 @@ -118,7 +118,7 @@ try: w_self.space.call_function(w_self.w_callable, w_self) except OperationError, e: - e.write_unraisable(w_self.space, 'function', w_self.w_callable) + e.write_unraisable(w_self.space, 'weakref callback ', w_self.w_callable) class W_Weakref(W_WeakrefBase): From dan at codespeak.net Sun Sep 12 10:59:30 2010 From: dan at codespeak.net (dan at codespeak.net) Date: Sun, 12 Sep 2010 10:59:30 +0200 (CEST) Subject: [pypy-svn] r77022 - pypy/branch/micronumpy/pypy/module/micronumpy Message-ID: <20100912085930.BE620282B9E@codespeak.net> Author: dan Date: Sun Sep 12 10:59:28 2010 New Revision: 77022 Modified: pypy/branch/micronumpy/pypy/module/micronumpy/microarray.py Log: Preparing for sync with trunk. Modified: pypy/branch/micronumpy/pypy/module/micronumpy/microarray.py ============================================================================== --- pypy/branch/micronumpy/pypy/module/micronumpy/microarray.py (original) +++ pypy/branch/micronumpy/pypy/module/micronumpy/microarray.py Sun Sep 12 10:59:28 2010 @@ -26,8 +26,8 @@ def __init__(self, array): self.array = array self.i = 0 - self.step = array.slice_steps[0] self.shape = array.shape[0] + self.step = array.slice_steps[0] self.stride = array.strides[0] self.ndim = len(array.shape) self.offset = 0 @@ -65,7 +65,7 @@ class MicroArray(BaseNumArray): - _immutable_fields_ = ['shape', 'parent', 'strides', 'offset', 'slice_starts'] + _immutable_fields_ = ['parent', 'data', 'offset', 'shape', 'slice_steps', 'strides'] def __init__(self, shape, dtype, order='C', strides=None, parent=None, offset=0, slice_steps=None): @@ -199,6 +199,7 @@ offset += start * self.slice_steps[i] * self.strides[i] shape[resdim] = length slice_steps[resdim] = self.slice_steps[i] * step + strides[resdim] = self.strides[i] resdim += 1 elif space.is_w(w_index, space.w_Ellipsis): shape[resdim] = self.shape[i] @@ -220,7 +221,7 @@ size = size_from_shape(shape) - if size == 0: + if len(shape) == 0: return self.getitem(space, offset) else: ar = MicroArray(shape, @@ -261,11 +262,11 @@ dtype = self.dtype.dtype offset, shape, slice_steps, strides = self.index2slices(space, w_index) + #print "Shape:", shape, "Steps:", slice_steps, "Strides:", strides size = size_from_shape(shape) try: - # XXX: if size is 0 we shouldn't really infer value_shape = infer_shape(space, w_value) value_size = size_from_shape(value_shape) except OperationError, e: @@ -274,7 +275,10 @@ value_size = 0 else: raise - if size == 0: + if len(value_shape) == 0 and len(shape) > 0: + self.set_slice_single_value(space, offset, shape, slice_steps, strides, + self.dtype.dtype.coerce(space, w_value)) + elif len(shape) == 0: if len(value_shape) > 0: raise OperationError(space.w_ValueError, space.wrap("shape mismatch: objects cannot" @@ -282,7 +286,7 @@ self.setitem(space, offset, self.dtype.dtype.coerce(space, w_value)) else: - if squeeze_shape(value_shape) != squeeze_shape(shape): + if shape != value_shape: raise OperationError(space.w_ValueError, space.wrap("shape mismatch: objects cannot" " be broadcast to a single shape")) From arigo at codespeak.net Sun Sep 12 13:18:59 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 12 Sep 2010 13:18:59 +0200 (CEST) Subject: [pypy-svn] r77023 - pypy/branch/abort-no-asm Message-ID: <20100912111859.1BB0A282BEF@codespeak.net> Author: arigo Date: Sun Sep 12 13:18:57 2010 New Revision: 77023 Removed: pypy/branch/abort-no-asm/ Log: This old branch was never merged, and now it's irrelevant. From arigo at codespeak.net Sun Sep 12 17:21:28 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 12 Sep 2010 17:21:28 +0200 (CEST) Subject: [pypy-svn] r77025 - pypy/trunk/pypy/jit/metainterp/test Message-ID: <20100912152128.E250B282B9D@codespeak.net> Author: arigo Date: Sun Sep 12 17:21:27 2010 New Revision: 77025 Modified: pypy/trunk/pypy/jit/metainterp/test/test_compile.py Log: Fix the test. Modified: pypy/trunk/pypy/jit/metainterp/test/test_compile.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_compile.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_compile.py Sun Sep 12 17:21:27 2010 @@ -178,15 +178,17 @@ portal_runner_adr = llmemory.cast_ptr_to_adr(portal_runner_ptr) portal_calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) portal_finishtoken = compile.DoneWithThisFrameDescrInt() + num_red_args = 2 result_type = INT # loop_token = compile_tmp_callback(cpu, FakeJitDriverSD(), [ConstInt(12), ConstInt(34)], - [BoxInt(56), ConstInt(78)]) + [BoxInt(56), ConstInt(78), BoxInt(90)]) # raiseme = None cpu.set_future_value_int(0, -156) cpu.set_future_value_int(1, -178) + cpu.set_future_value_int(2, -190) # passed in, but dropped fail_descr = cpu.execute_token(loop_token) assert fail_descr is FakeJitDriverSD().portal_finishtoken # @@ -195,6 +197,7 @@ raiseme = LLException("exception class", llexc) cpu.set_future_value_int(0, -156) cpu.set_future_value_int(1, -178) + cpu.set_future_value_int(2, -190) fail_descr = cpu.execute_token(loop_token) assert isinstance(fail_descr, compile.PropagateExceptionDescr) got = cpu.grab_exc_value() @@ -208,6 +211,7 @@ pass cpu.set_future_value_int(0, -156) cpu.set_future_value_int(1, -178) + cpu.set_future_value_int(2, -190) fail_descr = cpu.execute_token(loop_token) try: fail_descr.handle_fail(FakeMetaInterpSD(), FakeJitDriverSD()) From afa at codespeak.net Sun Sep 12 19:13:24 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Sun, 12 Sep 2010 19:13:24 +0200 (CEST) Subject: [pypy-svn] r77026 - pypy/branch/fast-forward/pypy/rlib/rstruct Message-ID: <20100912171324.5C8CB282B90@codespeak.net> Author: afa Date: Sun Sep 12 19:13:22 2010 New Revision: 77026 Modified: pypy/branch/fast-forward/pypy/rlib/rstruct/nativefmttable.py Log: Let the struct module compile with the Microsoft compiler Modified: pypy/branch/fast-forward/pypy/rlib/rstruct/nativefmttable.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rstruct/nativefmttable.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rstruct/nativefmttable.py Sun Sep 12 19:13:22 2010 @@ -80,7 +80,10 @@ '?': '_Bool', } - pre_include_bits = [] + pre_include_bits = [""" + #ifdef _MSC_VER + #define _Bool char + #endif"""] field_names = dict.fromkeys(INSPECT) for fmtchar, ctype in INSPECT.iteritems(): field_name = ctype.replace(" ", "_").replace("*", "star") From afa at codespeak.net Sun Sep 12 22:04:34 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Sun, 12 Sep 2010 22:04:34 +0200 (CEST) Subject: [pypy-svn] r77027 - in pypy/branch/fast-forward/pypy/module/sys: . test Message-ID: <20100912200434.069C7282B90@codespeak.net> Author: afa Date: Sun Sep 12 22:04:32 2010 New Revision: 77027 Modified: pypy/branch/fast-forward/pypy/module/sys/__init__.py pypy/branch/fast-forward/pypy/module/sys/system.py pypy/branch/fast-forward/pypy/module/sys/test/test_sysmodule.py Log: implement sys.long_info Modified: pypy/branch/fast-forward/pypy/module/sys/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/sys/__init__.py (original) +++ pypy/branch/fast-forward/pypy/module/sys/__init__.py Sun Sep 12 22:04:32 2010 @@ -75,6 +75,7 @@ 'getfilesystemencoding' : 'interp_encoding.getfilesystemencoding', 'float_info' : 'system.get_float_info(space)', + 'long_info' : 'system.get_long_info(space)', } if sys.platform == 'win32': Modified: pypy/branch/fast-forward/pypy/module/sys/system.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/sys/system.py (original) +++ pypy/branch/fast-forward/pypy/module/sys/system.py Sun Sep 12 22:04:32 2010 @@ -1,6 +1,7 @@ """Information about the current system.""" from pypy.interpreter import gateway -from pypy.rlib import rfloat +from pypy.rlib import rfloat, rbigint +from pypy.rpython.lltypesystem import rffi app = gateway.applevel(""" @@ -20,6 +21,11 @@ epsilon = structseqfield(8) radix = structseqfield(9) rounds = structseqfield(10) + +class long_info: + __metaclass__ = structseqtype + bits_per_digit = structseqfield(0) + sizeof_digit = structseqfield(1) """) @@ -39,3 +45,14 @@ ] w_float_info = app.wget(space, "float_info") return space.call_function(w_float_info, space.newtuple(info_w)) + +def get_long_info(space): + assert rbigint.SHIFT == 31 + bits_per_digit = rbigint.SHIFT + sizeof_digit = rffi.sizeof(rffi.ULONG) + info_w = [ + space.wrap(bits_per_digit), + space.wrap(sizeof_digit), + ] + w_long_info = app.wget(space, "long_info") + return space.call_function(w_long_info, space.newtuple(info_w)) Modified: pypy/branch/fast-forward/pypy/module/sys/test/test_sysmodule.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/sys/test/test_sysmodule.py (original) +++ pypy/branch/fast-forward/pypy/module/sys/test/test_sysmodule.py Sun Sep 12 22:04:32 2010 @@ -129,6 +129,11 @@ assert isinstance(fi.radix, int) assert isinstance(fi.rounds, int) + def test_long_info(self): + import sys + li = sys.long_info + assert isinstance(li.bits_per_digit, int) + assert isinstance(li.sizeof_digit, int) class AppTestSysModulePortedFromCPython: From afa at codespeak.net Sun Sep 12 22:07:26 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Sun, 12 Sep 2010 22:07:26 +0200 (CEST) Subject: [pypy-svn] r77028 - pypy/branch/fast-forward/lib-python/modified-2.7.0/test Message-ID: <20100912200726.01086282B90@codespeak.net> Author: afa Date: Sun Sep 12 22:07:25 2010 New Revision: 77028 Added: pypy/branch/fast-forward/lib-python/modified-2.7.0/test/ (props changed) pypy/branch/fast-forward/lib-python/modified-2.7.0/test/test_sys.py (contents, props changed) Log: Add a customized version of test_sys.py So far, only skip 2 tests as "implementation details" (about reference counting, and sys._current_frames) Added: pypy/branch/fast-forward/lib-python/modified-2.7.0/test/test_sys.py ============================================================================== --- (empty file) +++ pypy/branch/fast-forward/lib-python/modified-2.7.0/test/test_sys.py Sun Sep 12 22:07:25 2010 @@ -0,0 +1,795 @@ +# -*- coding: iso-8859-1 -*- +import unittest, test.test_support +import sys, os, cStringIO +import struct +import operator + +class SysModuleTest(unittest.TestCase): + + def tearDown(self): + test.test_support.reap_children() + + def test_original_displayhook(self): + import __builtin__ + savestdout = sys.stdout + out = cStringIO.StringIO() + sys.stdout = out + + dh = sys.__displayhook__ + + self.assertRaises(TypeError, dh) + if hasattr(__builtin__, "_"): + del __builtin__._ + + dh(None) + self.assertEqual(out.getvalue(), "") + self.assertTrue(not hasattr(__builtin__, "_")) + dh(42) + self.assertEqual(out.getvalue(), "42\n") + self.assertEqual(__builtin__._, 42) + + del sys.stdout + self.assertRaises(RuntimeError, dh, 42) + + sys.stdout = savestdout + + def test_lost_displayhook(self): + olddisplayhook = sys.displayhook + del sys.displayhook + code = compile("42", "", "single") + self.assertRaises(RuntimeError, eval, code) + sys.displayhook = olddisplayhook + + def test_custom_displayhook(self): + olddisplayhook = sys.displayhook + def baddisplayhook(obj): + raise ValueError + sys.displayhook = baddisplayhook + code = compile("42", "", "single") + self.assertRaises(ValueError, eval, code) + sys.displayhook = olddisplayhook + + def test_original_excepthook(self): + savestderr = sys.stderr + err = cStringIO.StringIO() + sys.stderr = err + + eh = sys.__excepthook__ + + self.assertRaises(TypeError, eh) + try: + raise ValueError(42) + except ValueError, exc: + eh(*sys.exc_info()) + + sys.stderr = savestderr + self.assertTrue(err.getvalue().endswith("ValueError: 42\n")) + + # FIXME: testing the code for a lost or replaced excepthook in + # Python/pythonrun.c::PyErr_PrintEx() is tricky. + + def test_exc_clear(self): + self.assertRaises(TypeError, sys.exc_clear, 42) + + # Verify that exc_info is present and matches exc, then clear it, and + # check that it worked. + def clear_check(exc): + typ, value, traceback = sys.exc_info() + self.assertTrue(typ is not None) + self.assertTrue(value is exc) + self.assertTrue(traceback is not None) + + with test.test_support.check_py3k_warnings(): + sys.exc_clear() + + typ, value, traceback = sys.exc_info() + self.assertTrue(typ is None) + self.assertTrue(value is None) + self.assertTrue(traceback is None) + + def clear(): + try: + raise ValueError, 42 + except ValueError, exc: + clear_check(exc) + + # Raise an exception and check that it can be cleared + clear() + + # Verify that a frame currently handling an exception is + # unaffected by calling exc_clear in a nested frame. + try: + raise ValueError, 13 + except ValueError, exc: + typ1, value1, traceback1 = sys.exc_info() + clear() + typ2, value2, traceback2 = sys.exc_info() + + self.assertTrue(typ1 is typ2) + self.assertTrue(value1 is exc) + self.assertTrue(value1 is value2) + self.assertTrue(traceback1 is traceback2) + + # Check that an exception can be cleared outside of an except block + clear_check(exc) + + def test_exit(self): + self.assertRaises(TypeError, sys.exit, 42, 42) + + # call without argument + try: + sys.exit(0) + except SystemExit, exc: + self.assertEquals(exc.code, 0) + except: + self.fail("wrong exception") + else: + self.fail("no exception") + + # call with tuple argument with one entry + # entry will be unpacked + try: + sys.exit(42) + except SystemExit, exc: + self.assertEquals(exc.code, 42) + except: + self.fail("wrong exception") + else: + self.fail("no exception") + + # call with integer argument + try: + sys.exit((42,)) + except SystemExit, exc: + self.assertEquals(exc.code, 42) + except: + self.fail("wrong exception") + else: + self.fail("no exception") + + # call with string argument + try: + sys.exit("exit") + except SystemExit, exc: + self.assertEquals(exc.code, "exit") + except: + self.fail("wrong exception") + else: + self.fail("no exception") + + # call with tuple argument with two entries + try: + sys.exit((17, 23)) + except SystemExit, exc: + self.assertEquals(exc.code, (17, 23)) + except: + self.fail("wrong exception") + else: + self.fail("no exception") + + # test that the exit machinery handles SystemExits properly + import subprocess + # both unnormalized... + rc = subprocess.call([sys.executable, "-c", + "raise SystemExit, 46"]) + self.assertEqual(rc, 46) + # ... and normalized + rc = subprocess.call([sys.executable, "-c", + "raise SystemExit(47)"]) + self.assertEqual(rc, 47) + + def check_exit_message(code, expected, env=None): + process = subprocess.Popen([sys.executable, "-c", code], + stderr=subprocess.PIPE, env=env) + stdout, stderr = process.communicate() + self.assertEqual(process.returncode, 1) + self.assertTrue(stderr.startswith(expected), + "%s doesn't start with %s" % (repr(stderr), repr(expected))) + + # test that stderr buffer if flushed before the exit message is written + # into stderr + check_exit_message( + r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")', + b"unflushed,message") + + # test that the unicode message is encoded to the stderr encoding + env = os.environ.copy() + env['PYTHONIOENCODING'] = 'latin-1' + check_exit_message( + r'import sys; sys.exit(u"h\xe9")', + b"h\xe9", env=env) + + def test_getdefaultencoding(self): + if test.test_support.have_unicode: + self.assertRaises(TypeError, sys.getdefaultencoding, 42) + # can't check more than the type, as the user might have changed it + self.assertIsInstance(sys.getdefaultencoding(), str) + + # testing sys.settrace() is done in test_sys_settrace.py + # testing sys.setprofile() is done in test_sys_setprofile.py + + def test_setcheckinterval(self): + self.assertRaises(TypeError, sys.setcheckinterval) + orig = sys.getcheckinterval() + for n in 0, 100, 120, orig: # orig last to restore starting state + sys.setcheckinterval(n) + self.assertEquals(sys.getcheckinterval(), n) + + def test_recursionlimit(self): + self.assertRaises(TypeError, sys.getrecursionlimit, 42) + oldlimit = sys.getrecursionlimit() + self.assertRaises(TypeError, sys.setrecursionlimit) + self.assertRaises(ValueError, sys.setrecursionlimit, -42) + sys.setrecursionlimit(10000) + self.assertEqual(sys.getrecursionlimit(), 10000) + sys.setrecursionlimit(oldlimit) + + def test_getwindowsversion(self): + # Raise SkipTest if sys doesn't have getwindowsversion attribute + test.test_support.get_attribute(sys, "getwindowsversion") + v = sys.getwindowsversion() + self.assertEqual(len(v), 5) + self.assertIsInstance(v[0], int) + self.assertIsInstance(v[1], int) + self.assertIsInstance(v[2], int) + self.assertIsInstance(v[3], int) + self.assertIsInstance(v[4], str) + self.assertRaises(IndexError, operator.getitem, v, 5) + self.assertIsInstance(v.major, int) + self.assertIsInstance(v.minor, int) + self.assertIsInstance(v.build, int) + self.assertIsInstance(v.platform, int) + self.assertIsInstance(v.service_pack, str) + self.assertIsInstance(v.service_pack_minor, int) + self.assertIsInstance(v.service_pack_major, int) + self.assertIsInstance(v.suite_mask, int) + self.assertIsInstance(v.product_type, int) + self.assertEqual(v[0], v.major) + self.assertEqual(v[1], v.minor) + self.assertEqual(v[2], v.build) + self.assertEqual(v[3], v.platform) + self.assertEqual(v[4], v.service_pack) + + # This is how platform.py calls it. Make sure tuple + # still has 5 elements + maj, min, buildno, plat, csd = sys.getwindowsversion() + + def test_dlopenflags(self): + if hasattr(sys, "setdlopenflags"): + self.assertTrue(hasattr(sys, "getdlopenflags")) + self.assertRaises(TypeError, sys.getdlopenflags, 42) + oldflags = sys.getdlopenflags() + self.assertRaises(TypeError, sys.setdlopenflags) + sys.setdlopenflags(oldflags+1) + self.assertEqual(sys.getdlopenflags(), oldflags+1) + sys.setdlopenflags(oldflags) + + @test.test_support.impl_detail("reference counting") + def test_refcount(self): + # n here must be a global in order for this test to pass while + # tracing with a python function. Tracing calls PyFrame_FastToLocals + # which will add a copy of any locals to the frame object, causing + # the reference count to increase by 2 instead of 1. + global n + self.assertRaises(TypeError, sys.getrefcount) + c = sys.getrefcount(None) + n = None + self.assertEqual(sys.getrefcount(None), c+1) + del n + self.assertEqual(sys.getrefcount(None), c) + if hasattr(sys, "gettotalrefcount"): + self.assertIsInstance(sys.gettotalrefcount(), int) + + def test_getframe(self): + self.assertRaises(TypeError, sys._getframe, 42, 42) + self.assertRaises(ValueError, sys._getframe, 2000000000) + self.assertTrue( + SysModuleTest.test_getframe.im_func.func_code \ + is sys._getframe().f_code + ) + + @test.test_support.impl_detail("current_frames") + def test_current_frames(self): + have_threads = True + try: + import thread + except ImportError: + have_threads = False + + if have_threads: + self.current_frames_with_threads() + else: + self.current_frames_without_threads() + + # Test sys._current_frames() in a WITH_THREADS build. + @test.test_support.reap_threads + def current_frames_with_threads(self): + import threading, thread + import traceback + + # Spawn a thread that blocks at a known place. Then the main + # thread does sys._current_frames(), and verifies that the frames + # returned make sense. + entered_g = threading.Event() + leave_g = threading.Event() + thread_info = [] # the thread's id + + def f123(): + g456() + + def g456(): + thread_info.append(thread.get_ident()) + entered_g.set() + leave_g.wait() + + t = threading.Thread(target=f123) + t.start() + entered_g.wait() + + # At this point, t has finished its entered_g.set(), although it's + # impossible to guess whether it's still on that line or has moved on + # to its leave_g.wait(). + self.assertEqual(len(thread_info), 1) + thread_id = thread_info[0] + + d = sys._current_frames() + + main_id = thread.get_ident() + self.assertIn(main_id, d) + self.assertIn(thread_id, d) + + # Verify that the captured main-thread frame is _this_ frame. + frame = d.pop(main_id) + self.assertTrue(frame is sys._getframe()) + + # Verify that the captured thread frame is blocked in g456, called + # from f123. This is a litte tricky, since various bits of + # threading.py are also in the thread's call stack. + frame = d.pop(thread_id) + stack = traceback.extract_stack(frame) + for i, (filename, lineno, funcname, sourceline) in enumerate(stack): + if funcname == "f123": + break + else: + self.fail("didn't find f123() on thread's call stack") + + self.assertEqual(sourceline, "g456()") + + # And the next record must be for g456(). + filename, lineno, funcname, sourceline = stack[i+1] + self.assertEqual(funcname, "g456") + self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"]) + + # Reap the spawned thread. + leave_g.set() + t.join() + + # Test sys._current_frames() when thread support doesn't exist. + def current_frames_without_threads(self): + # Not much happens here: there is only one thread, with artificial + # "thread id" 0. + d = sys._current_frames() + self.assertEqual(len(d), 1) + self.assertIn(0, d) + self.assertTrue(d[0] is sys._getframe()) + + def test_attributes(self): + self.assertIsInstance(sys.api_version, int) + self.assertIsInstance(sys.argv, list) + self.assertIn(sys.byteorder, ("little", "big")) + self.assertIsInstance(sys.builtin_module_names, tuple) + self.assertIsInstance(sys.copyright, basestring) + self.assertIsInstance(sys.exec_prefix, basestring) + self.assertIsInstance(sys.executable, basestring) + self.assertEqual(len(sys.float_info), 11) + self.assertEqual(sys.float_info.radix, 2) + self.assertEqual(len(sys.long_info), 2) + self.assertTrue(sys.long_info.bits_per_digit % 5 == 0) + self.assertTrue(sys.long_info.sizeof_digit >= 1) + self.assertEqual(type(sys.long_info.bits_per_digit), int) + self.assertEqual(type(sys.long_info.sizeof_digit), int) + self.assertIsInstance(sys.hexversion, int) + self.assertIsInstance(sys.maxint, int) + if test.test_support.have_unicode: + self.assertIsInstance(sys.maxunicode, int) + self.assertIsInstance(sys.platform, basestring) + self.assertIsInstance(sys.prefix, basestring) + self.assertIsInstance(sys.version, basestring) + vi = sys.version_info + self.assertIsInstance(vi[:], tuple) + self.assertEqual(len(vi), 5) + self.assertIsInstance(vi[0], int) + self.assertIsInstance(vi[1], int) + self.assertIsInstance(vi[2], int) + self.assertIn(vi[3], ("alpha", "beta", "candidate", "final")) + self.assertIsInstance(vi[4], int) + self.assertIsInstance(vi.major, int) + self.assertIsInstance(vi.minor, int) + self.assertIsInstance(vi.micro, int) + self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final")) + self.assertIsInstance(vi.serial, int) + self.assertEqual(vi[0], vi.major) + self.assertEqual(vi[1], vi.minor) + self.assertEqual(vi[2], vi.micro) + self.assertEqual(vi[3], vi.releaselevel) + self.assertEqual(vi[4], vi.serial) + self.assertTrue(vi > (1,0,0)) + self.assertIsInstance(sys.float_repr_style, str) + self.assertIn(sys.float_repr_style, ('short', 'legacy')) + + def test_43581(self): + # Can't use sys.stdout, as this is a cStringIO object when + # the test runs under regrtest. + self.assertTrue(sys.__stdout__.encoding == sys.__stderr__.encoding) + + def test_sys_flags(self): + self.assertTrue(sys.flags) + attrs = ("debug", "py3k_warning", "division_warning", "division_new", + "inspect", "interactive", "optimize", "dont_write_bytecode", + "no_site", "ignore_environment", "tabcheck", "verbose", + "unicode", "bytes_warning") + for attr in attrs: + self.assertTrue(hasattr(sys.flags, attr), attr) + self.assertEqual(type(getattr(sys.flags, attr)), int, attr) + self.assertTrue(repr(sys.flags)) + + def test_clear_type_cache(self): + sys._clear_type_cache() + + def test_ioencoding(self): + import subprocess + env = dict(os.environ) + + # Test character: cent sign, encoded as 0x4A (ASCII J) in CP424, + # not representable in ASCII. + + env["PYTHONIOENCODING"] = "cp424" + p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'], + stdout = subprocess.PIPE, env=env) + out = p.communicate()[0].strip() + self.assertEqual(out, unichr(0xa2).encode("cp424")) + + env["PYTHONIOENCODING"] = "ascii:replace" + p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'], + stdout = subprocess.PIPE, env=env) + out = p.communicate()[0].strip() + self.assertEqual(out, '?') + + def test_call_tracing(self): + self.assertEqual(sys.call_tracing(str, (2,)), "2") + self.assertRaises(TypeError, sys.call_tracing, str, 2) + + def test_executable(self): + # Issue #7774: Ensure that sys.executable is an empty string if argv[0] + # has been set to an non existent program name and Python is unable to + # retrieve the real program name + import subprocess + # For a normal installation, it should work without 'cwd' + # argument. For test runs in the build directory, see #7774. + python_dir = os.path.dirname(os.path.realpath(sys.executable)) + p = subprocess.Popen( + ["nonexistent", "-c", 'import sys; print repr(sys.executable)'], + executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir) + executable = p.communicate()[0].strip() + p.wait() + self.assertIn(executable, ["''", repr(sys.executable)]) + +class SizeofTest(unittest.TestCase): + + TPFLAGS_HAVE_GC = 1<<14 + TPFLAGS_HEAPTYPE = 1L<<9 + + def setUp(self): + self.c = len(struct.pack('c', ' ')) + self.H = len(struct.pack('H', 0)) + self.i = len(struct.pack('i', 0)) + self.l = len(struct.pack('l', 0)) + self.P = len(struct.pack('P', 0)) + # due to missing size_t information from struct, it is assumed that + # sizeof(Py_ssize_t) = sizeof(void*) + self.header = 'PP' + self.vheader = self.header + 'P' + if hasattr(sys, "gettotalrefcount"): + self.header += '2P' + self.vheader += '2P' + self.longdigit = sys.long_info.sizeof_digit + import _testcapi + self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD + self.file = open(test.test_support.TESTFN, 'wb') + + def tearDown(self): + self.file.close() + test.test_support.unlink(test.test_support.TESTFN) + + def check_sizeof(self, o, size): + result = sys.getsizeof(o) + if ((type(o) == type) and (o.__flags__ & self.TPFLAGS_HEAPTYPE) or\ + ((type(o) != type) and (type(o).__flags__ & self.TPFLAGS_HAVE_GC))): + size += self.gc_headsize + msg = 'wrong size for %s: got %d, expected %d' \ + % (type(o), result, size) + self.assertEqual(result, size, msg) + + def calcsize(self, fmt): + """Wrapper around struct.calcsize which enforces the alignment of the + end of a structure to the alignment requirement of pointer. + + Note: This wrapper should only be used if a pointer member is included + and no member with a size larger than a pointer exists. + """ + return struct.calcsize(fmt + '0P') + + def test_gc_head_size(self): + # Check that the gc header size is added to objects tracked by the gc. + h = self.header + size = self.calcsize + gc_header_size = self.gc_headsize + # bool objects are not gc tracked + self.assertEqual(sys.getsizeof(True), size(h + 'l')) + # but lists are + self.assertEqual(sys.getsizeof([]), size(h + 'P PP') + gc_header_size) + + def test_default(self): + h = self.header + size = self.calcsize + self.assertEqual(sys.getsizeof(True, -1), size(h + 'l')) + + def test_objecttypes(self): + # check all types defined in Objects/ + h = self.header + vh = self.vheader + size = self.calcsize + check = self.check_sizeof + # bool + check(True, size(h + 'l')) + # buffer + with test.test_support.check_py3k_warnings(): + check(buffer(''), size(h + '2P2Pil')) + # builtin_function_or_method + check(len, size(h + '3P')) + # bytearray + samples = ['', 'u'*100000] + for sample in samples: + x = bytearray(sample) + check(x, size(vh + 'iPP') + x.__alloc__() * self.c) + # bytearray_iterator + check(iter(bytearray()), size(h + 'PP')) + # cell + def get_cell(): + x = 42 + def inner(): + return x + return inner + check(get_cell().func_closure[0], size(h + 'P')) + # classobj (old-style class) + class class_oldstyle(): + def method(): + pass + check(class_oldstyle, size(h + '7P')) + # instance (old-style class) + check(class_oldstyle(), size(h + '3P')) + # instancemethod (old-style class) + check(class_oldstyle().method, size(h + '4P')) + # complex + check(complex(0,1), size(h + '2d')) + # code + check(get_cell().func_code, size(h + '4i8Pi3P')) + # BaseException + check(BaseException(), size(h + '3P')) + # UnicodeEncodeError + check(UnicodeEncodeError("", u"", 0, 0, ""), size(h + '5P2PP')) + # UnicodeDecodeError + check(UnicodeDecodeError("", "", 0, 0, ""), size(h + '5P2PP')) + # UnicodeTranslateError + check(UnicodeTranslateError(u"", 0, 1, ""), size(h + '5P2PP')) + # method_descriptor (descriptor object) + check(str.lower, size(h + '2PP')) + # classmethod_descriptor (descriptor object) + # XXX + # member_descriptor (descriptor object) + import datetime + check(datetime.timedelta.days, size(h + '2PP')) + # getset_descriptor (descriptor object) + import __builtin__ + check(__builtin__.file.closed, size(h + '2PP')) + # wrapper_descriptor (descriptor object) + check(int.__add__, size(h + '2P2P')) + # dictproxy + class C(object): pass + check(C.__dict__, size(h + 'P')) + # method-wrapper (descriptor object) + check({}.__iter__, size(h + '2P')) + # dict + check({}, size(h + '3P2P' + 8*'P2P')) + x = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8} + check(x, size(h + '3P2P' + 8*'P2P') + 16*size('P2P')) + # dictionary-keyiterator + check({}.iterkeys(), size(h + 'P2PPP')) + # dictionary-valueiterator + check({}.itervalues(), size(h + 'P2PPP')) + # dictionary-itemiterator + check({}.iteritems(), size(h + 'P2PPP')) + # ellipses + check(Ellipsis, size(h + '')) + # EncodingMap + import codecs, encodings.iso8859_3 + x = codecs.charmap_build(encodings.iso8859_3.decoding_table) + check(x, size(h + '32B2iB')) + # enumerate + check(enumerate([]), size(h + 'l3P')) + # file + check(self.file, size(h + '4P2i4P3i3P3i')) + # float + check(float(0), size(h + 'd')) + # sys.floatinfo + check(sys.float_info, size(vh) + self.P * len(sys.float_info)) + # frame + import inspect + CO_MAXBLOCKS = 20 + x = inspect.currentframe() + ncells = len(x.f_code.co_cellvars) + nfrees = len(x.f_code.co_freevars) + extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\ + ncells + nfrees - 1 + check(x, size(vh + '12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P')) + # function + def func(): pass + check(func, size(h + '9P')) + class c(): + @staticmethod + def foo(): + pass + @classmethod + def bar(cls): + pass + # staticmethod + check(foo, size(h + 'P')) + # classmethod + check(bar, size(h + 'P')) + # generator + def get_gen(): yield 1 + check(get_gen(), size(h + 'Pi2P')) + # integer + check(1, size(h + 'l')) + check(100, size(h + 'l')) + # iterator + check(iter('abc'), size(h + 'lP')) + # callable-iterator + import re + check(re.finditer('',''), size(h + '2P')) + # list + samples = [[], [1,2,3], ['1', '2', '3']] + for sample in samples: + check(sample, size(vh + 'PP') + len(sample)*self.P) + # sortwrapper (list) + # XXX + # cmpwrapper (list) + # XXX + # listiterator (list) + check(iter([]), size(h + 'lP')) + # listreverseiterator (list) + check(reversed([]), size(h + 'lP')) + # long + check(0L, size(vh)) + check(1L, size(vh) + self.longdigit) + check(-1L, size(vh) + self.longdigit) + PyLong_BASE = 2**sys.long_info.bits_per_digit + check(long(PyLong_BASE), size(vh) + 2*self.longdigit) + check(long(PyLong_BASE**2-1), size(vh) + 2*self.longdigit) + check(long(PyLong_BASE**2), size(vh) + 3*self.longdigit) + # module + check(unittest, size(h + 'P')) + # None + check(None, size(h + '')) + # object + check(object(), size(h + '')) + # property (descriptor object) + class C(object): + def getx(self): return self.__x + def setx(self, value): self.__x = value + def delx(self): del self.__x + x = property(getx, setx, delx, "") + check(x, size(h + '4Pi')) + # PyCObject + # PyCapsule + # XXX + # rangeiterator + check(iter(xrange(1)), size(h + '4l')) + # reverse + check(reversed(''), size(h + 'PP')) + # set + # frozenset + PySet_MINSIZE = 8 + samples = [[], range(10), range(50)] + s = size(h + '3P2P' + PySet_MINSIZE*'lP' + 'lP') + for sample in samples: + minused = len(sample) + if minused == 0: tmp = 1 + # the computation of minused is actually a bit more complicated + # but this suffices for the sizeof test + minused = minused*2 + newsize = PySet_MINSIZE + while newsize <= minused: + newsize = newsize << 1 + if newsize <= 8: + check(set(sample), s) + check(frozenset(sample), s) + else: + check(set(sample), s + newsize*struct.calcsize('lP')) + check(frozenset(sample), s + newsize*struct.calcsize('lP')) + # setiterator + check(iter(set()), size(h + 'P3P')) + # slice + check(slice(1), size(h + '3P')) + # str + check('', struct.calcsize(vh + 'li') + 1) + check('abc', struct.calcsize(vh + 'li') + 1 + 3*self.c) + # super + check(super(int), size(h + '3P')) + # tuple + check((), size(vh)) + check((1,2,3), size(vh) + 3*self.P) + # tupleiterator + check(iter(()), size(h + 'lP')) + # type + # (PyTypeObject + PyNumberMethods + PyMappingMethods + + # PySequenceMethods + PyBufferProcs) + s = size(vh + 'P2P15Pl4PP9PP11PI') + size('41P 10P 3P 6P') + class newstyleclass(object): + pass + check(newstyleclass, s) + # builtin type + check(int, s) + # NotImplementedType + import types + check(types.NotImplementedType, s) + # unicode + usize = len(u'\0'.encode('unicode-internal')) + samples = [u'', u'1'*100] + # we need to test for both sizes, because we don't know if the string + # has been cached + for s in samples: + check(s, size(h + 'PPlP') + usize * (len(s) + 1)) + # weakref + import weakref + check(weakref.ref(int), size(h + '2Pl2P')) + # weakproxy + # XXX + # weakcallableproxy + check(weakref.proxy(int), size(h + '2Pl2P')) + # xrange + check(xrange(1), size(h + '3l')) + check(xrange(66000), size(h + '3l')) + + def test_pythontypes(self): + # check all types defined in Python/ + h = self.header + vh = self.vheader + size = self.calcsize + check = self.check_sizeof + # _ast.AST + import _ast + check(_ast.AST(), size(h + '')) + # imp.NullImporter + import imp + check(imp.NullImporter(self.file.name), size(h + '')) + try: + raise TypeError + except TypeError: + tb = sys.exc_info()[2] + # traceback + if tb != None: + check(tb, size(h + '2P2i')) + # symtable entry + # XXX + # sys.flags + check(sys.flags, size(vh) + self.P * len(sys.flags)) + + +def test_main(): + test_classes = (SysModuleTest, SizeofTest) + + test.test_support.run_unittest(*test_classes) + +if __name__ == "__main__": + test_main() From afa at codespeak.net Sun Sep 12 22:11:52 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Sun, 12 Sep 2010 22:11:52 +0200 (CEST) Subject: [pypy-svn] r77029 - pypy/branch/fast-forward/pypy/module/posix Message-ID: <20100912201152.033E3282B90@codespeak.net> Author: afa Date: Sun Sep 12 22:11:51 2010 New Revision: 77029 Modified: pypy/branch/fast-forward/pypy/module/posix/app_posix.py Log: For the moment, on Windows we disable the validation of a file descriptor in os.fdopen(). This needs more thinking, and "happy nonsensical" code like http://paste.pocoo.org/show/259546/ Modified: pypy/branch/fast-forward/pypy/module/posix/app_posix.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/posix/app_posix.py (original) +++ pypy/branch/fast-forward/pypy/module/posix/app_posix.py Sun Sep 12 22:11:51 2010 @@ -64,6 +64,18 @@ if self.st_ctime is None: self.__dict__['st_ctime'] = self[9] +if osname == 'posix': + def _validate_fd(fd): + import fcntl + try: + fcntl.fcntl(fd, fcntl.F_GETFD) + except IOError, e: + raise OSError(e.errno, e.strerror, e.filename) +else: + def _validate_fd(fd): + # XXX for the moment + return + # Capture file.fdopen at import time, as some code replaces # __builtins__.file with a custom function. _fdopen = file.fdopen @@ -72,12 +84,7 @@ """fdopen(fd [, mode='r' [, buffering]]) -> file_object Return an open file object connected to a file descriptor.""" - # Validate fd - import fcntl - try: - fcntl.fcntl(fd, fcntl.F_GETFD) - except IOError, e: - raise OSError(e.errno, e.strerror, e.filename) + _validate_fd(fd) return _fdopen(fd, mode, buffering) From afa at codespeak.net Mon Sep 13 00:59:14 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Mon, 13 Sep 2010 00:59:14 +0200 (CEST) Subject: [pypy-svn] r77030 - pypy/branch/fast-forward/lib-python/modified-2.7.0/test Message-ID: <20100912225914.6F567282B90@codespeak.net> Author: afa Date: Mon Sep 13 00:59:12 2010 New Revision: 77030 Added: pypy/branch/fast-forward/lib-python/modified-2.7.0/test/__init__.py (contents, props changed) pypy/branch/fast-forward/lib-python/modified-2.7.0/test/test_support.py (contents, props changed) Log: Let the modified tests run instead of the initial ones Added: pypy/branch/fast-forward/lib-python/modified-2.7.0/test/__init__.py ============================================================================== --- (empty file) +++ pypy/branch/fast-forward/lib-python/modified-2.7.0/test/__init__.py Mon Sep 13 00:59:12 2010 @@ -0,0 +1 @@ +# Dummy file to make this directory a package. Added: pypy/branch/fast-forward/lib-python/modified-2.7.0/test/test_support.py ============================================================================== --- (empty file) +++ pypy/branch/fast-forward/lib-python/modified-2.7.0/test/test_support.py Mon Sep 13 00:59:12 2010 @@ -0,0 +1,1201 @@ +"""Supporting definitions for the Python regression tests.""" + +if __name__ != 'test.test_support': + raise ImportError('test_support must be imported from the test package') + +import contextlib +import errno +import functools +import gc +import socket +import sys +import os +import platform +import shutil +import warnings +import unittest +import importlib +import UserDict +import re +import time +try: + import thread +except ImportError: + thread = None + +__all__ = ["Error", "TestFailed", "ResourceDenied", "import_module", + "verbose", "use_resources", "max_memuse", "record_original_stdout", + "get_original_stdout", "unload", "unlink", "rmtree", "forget", + "is_resource_enabled", "requires", "find_unused_port", "bind_port", + "fcmp", "have_unicode", "is_jython", "TESTFN", "HOST", "FUZZ", + "SAVEDCWD", "temp_cwd", "findfile", "sortdict", "check_syntax_error", + "open_urlresource", "check_warnings", "check_py3k_warnings", + "CleanImport", "EnvironmentVarGuard", "captured_output", + "captured_stdout", "TransientResource", "transient_internet", + "run_with_locale", "set_memlimit", "bigmemtest", "bigaddrspacetest", + "BasicTestRunner", "run_unittest", "run_doctest", "threading_setup", + "threading_cleanup", "reap_children", "cpython_only", + "check_impl_detail", "get_attribute", "py3k_bytes"] + + +class Error(Exception): + """Base class for regression test exceptions.""" + +class TestFailed(Error): + """Test failed.""" + +class ResourceDenied(unittest.SkipTest): + """Test skipped because it requested a disallowed resource. + + This is raised when a test calls requires() for a resource that + has not been enabled. It is used to distinguish between expected + and unexpected skips. + """ + + at contextlib.contextmanager +def _ignore_deprecated_imports(ignore=True): + """Context manager to suppress package and module deprecation + warnings when importing them. + + If ignore is False, this context manager has no effect.""" + if ignore: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".+ (module|package)", + DeprecationWarning) + yield + else: + yield + + +def import_module(name, deprecated=False): + """Import and return the module to be tested, raising SkipTest if + it is not available. + + If deprecated is True, any module or package deprecation messages + will be suppressed.""" + with _ignore_deprecated_imports(deprecated): + try: + return importlib.import_module(name) + except ImportError, msg: + raise unittest.SkipTest(str(msg)) + + +def _save_and_remove_module(name, orig_modules): + """Helper function to save and remove a module from sys.modules + + Return value is True if the module was in sys.modules and + False otherwise.""" + saved = True + try: + orig_modules[name] = sys.modules[name] + except KeyError: + saved = False + else: + del sys.modules[name] + return saved + + +def _save_and_block_module(name, orig_modules): + """Helper function to save and block a module in sys.modules + + Return value is True if the module was in sys.modules and + False otherwise.""" + saved = True + try: + orig_modules[name] = sys.modules[name] + except KeyError: + saved = False + sys.modules[name] = None + return saved + + +def import_fresh_module(name, fresh=(), blocked=(), deprecated=False): + """Imports and returns a module, deliberately bypassing the sys.modules cache + and importing a fresh copy of the module. Once the import is complete, + the sys.modules cache is restored to its original state. + + Modules named in fresh are also imported anew if needed by the import. + + Importing of modules named in blocked is prevented while the fresh import + takes place. + + If deprecated is True, any module or package deprecation messages + will be suppressed.""" + # NOTE: test_heapq and test_warnings include extra sanity checks to make + # sure that this utility function is working as expected + with _ignore_deprecated_imports(deprecated): + # Keep track of modules saved for later restoration as well + # as those which just need a blocking entry removed + orig_modules = {} + names_to_remove = [] + _save_and_remove_module(name, orig_modules) + try: + for fresh_name in fresh: + _save_and_remove_module(fresh_name, orig_modules) + for blocked_name in blocked: + if not _save_and_block_module(blocked_name, orig_modules): + names_to_remove.append(blocked_name) + fresh_module = importlib.import_module(name) + finally: + for orig_name, module in orig_modules.items(): + sys.modules[orig_name] = module + for name_to_remove in names_to_remove: + del sys.modules[name_to_remove] + return fresh_module + + +def get_attribute(obj, name): + """Get an attribute, raising SkipTest if AttributeError is raised.""" + try: + attribute = getattr(obj, name) + except AttributeError: + raise unittest.SkipTest("module %s has no attribute %s" % ( + obj.__name__, name)) + else: + return attribute + + +verbose = 1 # Flag set to 0 by regrtest.py +use_resources = None # Flag set to [] by regrtest.py +max_memuse = 0 # Disable bigmem tests (they will still be run with + # small sizes, to make sure they work.) +real_max_memuse = 0 + +# _original_stdout is meant to hold stdout at the time regrtest began. +# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever. +# The point is to have some flavor of stdout the user can actually see. +_original_stdout = None +def record_original_stdout(stdout): + global _original_stdout + _original_stdout = stdout + +def get_original_stdout(): + return _original_stdout or sys.stdout + +def unload(name): + try: + del sys.modules[name] + except KeyError: + pass + +def unlink(filename): + try: + os.unlink(filename) + except OSError: + pass + +def rmtree(path): + try: + shutil.rmtree(path) + except OSError, e: + # Unix returns ENOENT, Windows returns ESRCH. + if e.errno not in (errno.ENOENT, errno.ESRCH): + raise + +def forget(modname): + '''"Forget" a module was ever imported by removing it from sys.modules and + deleting any .pyc and .pyo files.''' + unload(modname) + for dirname in sys.path: + unlink(os.path.join(dirname, modname + os.extsep + 'pyc')) + # Deleting the .pyo file cannot be within the 'try' for the .pyc since + # the chance exists that there is no .pyc (and thus the 'try' statement + # is exited) but there is a .pyo file. + unlink(os.path.join(dirname, modname + os.extsep + 'pyo')) + +def is_resource_enabled(resource): + """Test whether a resource is enabled. Known resources are set by + regrtest.py.""" + return use_resources is not None and resource in use_resources + +def requires(resource, msg=None): + """Raise ResourceDenied if the specified resource is not available. + + If the caller's module is __main__ then automatically return True. The + possibility of False being returned occurs when regrtest.py is executing.""" + # see if the caller's module is __main__ - if so, treat as if + # the resource was set + if sys._getframe(1).f_globals.get("__name__") == "__main__": + return + if not is_resource_enabled(resource): + if msg is None: + msg = "Use of the `%s' resource not enabled" % resource + raise ResourceDenied(msg) + +HOST = 'localhost' + +def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM): + """Returns an unused port that should be suitable for binding. This is + achieved by creating a temporary socket with the same family and type as + the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to + the specified host address (defaults to 0.0.0.0) with the port set to 0, + eliciting an unused ephemeral port from the OS. The temporary socket is + then closed and deleted, and the ephemeral port is returned. + + Either this method or bind_port() should be used for any tests where a + server socket needs to be bound to a particular port for the duration of + the test. Which one to use depends on whether the calling code is creating + a python socket, or if an unused port needs to be provided in a constructor + or passed to an external program (i.e. the -accept argument to openssl's + s_server mode). Always prefer bind_port() over find_unused_port() where + possible. Hard coded ports should *NEVER* be used. As soon as a server + socket is bound to a hard coded port, the ability to run multiple instances + of the test simultaneously on the same host is compromised, which makes the + test a ticking time bomb in a buildbot environment. On Unix buildbots, this + may simply manifest as a failed test, which can be recovered from without + intervention in most cases, but on Windows, the entire python process can + completely and utterly wedge, requiring someone to log in to the buildbot + and manually kill the affected process. + + (This is easy to reproduce on Windows, unfortunately, and can be traced to + the SO_REUSEADDR socket option having different semantics on Windows versus + Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind, + listen and then accept connections on identical host/ports. An EADDRINUSE + socket.error will be raised at some point (depending on the platform and + the order bind and listen were called on each socket). + + However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE + will ever be raised when attempting to bind two identical host/ports. When + accept() is called on each socket, the second caller's process will steal + the port from the first caller, leaving them both in an awkwardly wedged + state where they'll no longer respond to any signals or graceful kills, and + must be forcibly killed via OpenProcess()/TerminateProcess(). + + The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option + instead of SO_REUSEADDR, which effectively affords the same semantics as + SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open + Source world compared to Windows ones, this is a common mistake. A quick + look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when + openssl.exe is called with the 's_server' option, for example. See + http://bugs.python.org/issue2550 for more info. The following site also + has a very thorough description about the implications of both REUSEADDR + and EXCLUSIVEADDRUSE on Windows: + http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx) + + XXX: although this approach is a vast improvement on previous attempts to + elicit unused ports, it rests heavily on the assumption that the ephemeral + port returned to us by the OS won't immediately be dished back out to some + other process when we close and delete our temporary socket but before our + calling code has a chance to bind the returned port. We can deal with this + issue if/when we come across it.""" + tempsock = socket.socket(family, socktype) + port = bind_port(tempsock) + tempsock.close() + del tempsock + return port + +def bind_port(sock, host=HOST): + """Bind the socket to a free port and return the port number. Relies on + ephemeral ports in order to ensure we are using an unbound port. This is + important as many tests may be running simultaneously, especially in a + buildbot environment. This method raises an exception if the sock.family + is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR + or SO_REUSEPORT set on it. Tests should *never* set these socket options + for TCP/IP sockets. The only case for setting these options is testing + multicasting via multiple UDP sockets. + + Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e. + on Windows), it will be set on the socket. This will prevent anyone else + from bind()'ing to our host/port for the duration of the test. + """ + if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM: + if hasattr(socket, 'SO_REUSEADDR'): + if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1: + raise TestFailed("tests should never set the SO_REUSEADDR " \ + "socket option on TCP/IP sockets!") + if hasattr(socket, 'SO_REUSEPORT'): + if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1: + raise TestFailed("tests should never set the SO_REUSEPORT " \ + "socket option on TCP/IP sockets!") + if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'): + sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1) + + sock.bind((host, 0)) + port = sock.getsockname()[1] + return port + +FUZZ = 1e-6 + +def fcmp(x, y): # fuzzy comparison function + if isinstance(x, float) or isinstance(y, float): + try: + fuzz = (abs(x) + abs(y)) * FUZZ + if abs(x-y) <= fuzz: + return 0 + except: + pass + elif type(x) == type(y) and isinstance(x, (tuple, list)): + for i in range(min(len(x), len(y))): + outcome = fcmp(x[i], y[i]) + if outcome != 0: + return outcome + return (len(x) > len(y)) - (len(x) < len(y)) + return (x > y) - (x < y) + +try: + unicode + have_unicode = True +except NameError: + have_unicode = False + +is_jython = sys.platform.startswith('java') + +# Filename used for testing +if os.name == 'java': + # Jython disallows @ in module names + TESTFN = '$test' +elif os.name == 'riscos': + TESTFN = 'testfile' +else: + TESTFN = '@test' + # Unicode name only used if TEST_FN_ENCODING exists for the platform. + if have_unicode: + # Assuming sys.getfilesystemencoding()!=sys.getdefaultencoding() + # TESTFN_UNICODE is a filename that can be encoded using the + # file system encoding, but *not* with the default (ascii) encoding + if isinstance('', unicode): + # python -U + # XXX perhaps unicode() should accept Unicode strings? + TESTFN_UNICODE = "@test-\xe0\xf2" + else: + # 2 latin characters. + TESTFN_UNICODE = unicode("@test-\xe0\xf2", "latin-1") + TESTFN_ENCODING = sys.getfilesystemencoding() + # TESTFN_UNENCODABLE is a filename that should *not* be + # able to be encoded by *either* the default or filesystem encoding. + # This test really only makes sense on Windows NT platforms + # which have special Unicode support in posixmodule. + if (not hasattr(sys, "getwindowsversion") or + sys.getwindowsversion()[3] < 2): # 0=win32s or 1=9x/ME + TESTFN_UNENCODABLE = None + else: + # Japanese characters (I think - from bug 846133) + TESTFN_UNENCODABLE = eval('u"@test-\u5171\u6709\u3055\u308c\u308b"') + try: + # XXX - Note - should be using TESTFN_ENCODING here - but for + # Windows, "mbcs" currently always operates as if in + # errors=ignore' mode - hence we get '?' characters rather than + # the exception. 'Latin1' operates as we expect - ie, fails. + # See [ 850997 ] mbcs encoding ignores errors + TESTFN_UNENCODABLE.encode("Latin1") + except UnicodeEncodeError: + pass + else: + print \ + 'WARNING: The filename %r CAN be encoded by the filesystem. ' \ + 'Unicode filename tests may not be effective' \ + % TESTFN_UNENCODABLE + + +# Disambiguate TESTFN for parallel testing, while letting it remain a valid +# module name. +TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid()) + +# Save the initial cwd +SAVEDCWD = os.getcwd() + + at contextlib.contextmanager +def temp_cwd(name='tempcwd', quiet=False): + """ + Context manager that creates a temporary directory and set it as CWD. + + The new CWD is created in the current directory and it's named *name*. + If *quiet* is False (default) and it's not possible to create or change + the CWD, an error is raised. If it's True, only a warning is raised + and the original CWD is used. + """ + if isinstance(name, unicode): + try: + name = name.encode(sys.getfilesystemencoding() or 'ascii') + except UnicodeEncodeError: + if not quiet: + raise unittest.SkipTest('unable to encode the cwd name with ' + 'the filesystem encoding.') + saved_dir = os.getcwd() + is_temporary = False + try: + os.mkdir(name) + os.chdir(name) + is_temporary = True + except OSError: + if not quiet: + raise + warnings.warn('tests may fail, unable to change the CWD to ' + name, + RuntimeWarning, stacklevel=3) + try: + yield os.getcwd() + finally: + os.chdir(saved_dir) + if is_temporary: + rmtree(name) + + +def findfile(file, here=__file__, subdir=None): + """Try to find a file on sys.path and the working directory. If it is not + found the argument passed to the function is returned (this does not + necessarily signal failure; could still be the legitimate path).""" + if os.path.isabs(file): + return file + if subdir is not None: + file = os.path.join(subdir, file) + path = sys.path + path = [os.path.dirname(here)] + path + for dn in path: + fn = os.path.join(dn, file) + if os.path.exists(fn): return fn + return file + +def sortdict(dict): + "Like repr(dict), but in sorted order." + items = dict.items() + items.sort() + reprpairs = ["%r: %r" % pair for pair in items] + withcommas = ", ".join(reprpairs) + return "{%s}" % withcommas + +def make_bad_fd(): + """ + Create an invalid file descriptor by opening and closing a file and return + its fd. + """ + file = open(TESTFN, "wb") + try: + return file.fileno() + finally: + file.close() + unlink(TESTFN) + +def check_syntax_error(testcase, statement): + testcase.assertRaises(SyntaxError, compile, statement, + '', 'exec') + +def open_urlresource(url, check=None): + import urlparse, urllib2 + + filename = urlparse.urlparse(url)[2].split('/')[-1] # '/': it's URL! + + fn = os.path.join(os.path.dirname(__file__), "data", filename) + + def check_valid_file(fn): + f = open(fn) + if check is None: + return f + elif check(f): + f.seek(0) + return f + f.close() + + if os.path.exists(fn): + f = check_valid_file(fn) + if f is not None: + return f + unlink(fn) + + # Verify the requirement before downloading the file + requires('urlfetch') + + print >> get_original_stdout(), '\tfetching %s ...' % url + f = urllib2.urlopen(url, timeout=15) + try: + with open(fn, "wb") as out: + s = f.read() + while s: + out.write(s) + s = f.read() + finally: + f.close() + + f = check_valid_file(fn) + if f is not None: + return f + raise TestFailed('invalid resource "%s"' % fn) + + +class WarningsRecorder(object): + """Convenience wrapper for the warnings list returned on + entry to the warnings.catch_warnings() context manager. + """ + def __init__(self, warnings_list): + self._warnings = warnings_list + self._last = 0 + + def __getattr__(self, attr): + if len(self._warnings) > self._last: + return getattr(self._warnings[-1], attr) + elif attr in warnings.WarningMessage._WARNING_DETAILS: + return None + raise AttributeError("%r has no attribute %r" % (self, attr)) + + @property + def warnings(self): + return self._warnings[self._last:] + + def reset(self): + self._last = len(self._warnings) + + +def _filterwarnings(filters, quiet=False): + """Catch the warnings, then check if all the expected + warnings have been raised and re-raise unexpected warnings. + If 'quiet' is True, only re-raise the unexpected warnings. + """ + # Clear the warning registry of the calling module + # in order to re-raise the warnings. + frame = sys._getframe(2) + registry = frame.f_globals.get('__warningregistry__') + if registry: + registry.clear() + with warnings.catch_warnings(record=True) as w: + # Set filter "always" to record all warnings. Because + # test_warnings swap the module, we need to look up in + # the sys.modules dictionary. + sys.modules['warnings'].simplefilter("always") + yield WarningsRecorder(w) + # Filter the recorded warnings + reraise = [warning.message for warning in w] + missing = [] + for msg, cat in filters: + seen = False + for exc in reraise[:]: + message = str(exc) + # Filter out the matching messages + if (re.match(msg, message, re.I) and + issubclass(exc.__class__, cat)): + seen = True + reraise.remove(exc) + if not seen and not quiet: + # This filter caught nothing + missing.append((msg, cat.__name__)) + if reraise: + raise AssertionError("unhandled warning %r" % reraise[0]) + if missing: + raise AssertionError("filter (%r, %s) did not catch any warning" % + missing[0]) + + + at contextlib.contextmanager +def check_warnings(*filters, **kwargs): + """Context manager to silence warnings. + + Accept 2-tuples as positional arguments: + ("message regexp", WarningCategory) + + Optional argument: + - if 'quiet' is True, it does not fail if a filter catches nothing + (default True without argument, + default False if some filters are defined) + + Without argument, it defaults to: + check_warnings(("", Warning), quiet=True) + """ + quiet = kwargs.get('quiet') + if not filters: + filters = (("", Warning),) + # Preserve backward compatibility + if quiet is None: + quiet = True + return _filterwarnings(filters, quiet) + + + at contextlib.contextmanager +def check_py3k_warnings(*filters, **kwargs): + """Context manager to silence py3k warnings. + + Accept 2-tuples as positional arguments: + ("message regexp", WarningCategory) + + Optional argument: + - if 'quiet' is True, it does not fail if a filter catches nothing + (default False) + + Without argument, it defaults to: + check_py3k_warnings(("", DeprecationWarning), quiet=False) + """ + if sys.py3kwarning: + if not filters: + filters = (("", DeprecationWarning),) + else: + # It should not raise any py3k warning + filters = () + return _filterwarnings(filters, kwargs.get('quiet')) + + +class CleanImport(object): + """Context manager to force import to return a new module reference. + + This is useful for testing module-level behaviours, such as + the emission of a DeprecationWarning on import. + + Use like this: + + with CleanImport("foo"): + importlib.import_module("foo") # new reference + """ + + def __init__(self, *module_names): + self.original_modules = sys.modules.copy() + for module_name in module_names: + if module_name in sys.modules: + module = sys.modules[module_name] + # It is possible that module_name is just an alias for + # another module (e.g. stub for modules renamed in 3.x). + # In that case, we also need delete the real module to clear + # the import cache. + if module.__name__ != module_name: + del sys.modules[module.__name__] + del sys.modules[module_name] + + def __enter__(self): + return self + + def __exit__(self, *ignore_exc): + sys.modules.update(self.original_modules) + + +class EnvironmentVarGuard(UserDict.DictMixin): + + """Class to help protect the environment variable properly. Can be used as + a context manager.""" + + def __init__(self): + self._environ = os.environ + self._changed = {} + + def __getitem__(self, envvar): + return self._environ[envvar] + + def __setitem__(self, envvar, value): + # Remember the initial value on the first access + if envvar not in self._changed: + self._changed[envvar] = self._environ.get(envvar) + self._environ[envvar] = value + + def __delitem__(self, envvar): + # Remember the initial value on the first access + if envvar not in self._changed: + self._changed[envvar] = self._environ.get(envvar) + if envvar in self._environ: + del self._environ[envvar] + + def keys(self): + return self._environ.keys() + + def set(self, envvar, value): + self[envvar] = value + + def unset(self, envvar): + del self[envvar] + + def __enter__(self): + return self + + def __exit__(self, *ignore_exc): + for (k, v) in self._changed.items(): + if v is None: + if k in self._environ: + del self._environ[k] + else: + self._environ[k] = v + os.environ = self._environ + + +class DirsOnSysPath(object): + """Context manager to temporarily add directories to sys.path. + + This makes a copy of sys.path, appends any directories given + as positional arguments, then reverts sys.path to the copied + settings when the context ends. + + Note that *all* sys.path modifications in the body of the + context manager, including replacement of the object, + will be reverted at the end of the block. + """ + + def __init__(self, *paths): + self.original_value = sys.path[:] + self.original_object = sys.path + sys.path.extend(paths) + + def __enter__(self): + return self + + def __exit__(self, *ignore_exc): + sys.path = self.original_object + sys.path[:] = self.original_value + + +class TransientResource(object): + + """Raise ResourceDenied if an exception is raised while the context manager + is in effect that matches the specified exception and attributes.""" + + def __init__(self, exc, **kwargs): + self.exc = exc + self.attrs = kwargs + + def __enter__(self): + return self + + def __exit__(self, type_=None, value=None, traceback=None): + """If type_ is a subclass of self.exc and value has attributes matching + self.attrs, raise ResourceDenied. Otherwise let the exception + propagate (if any).""" + if type_ is not None and issubclass(self.exc, type_): + for attr, attr_value in self.attrs.iteritems(): + if not hasattr(value, attr): + break + if getattr(value, attr) != attr_value: + break + else: + raise ResourceDenied("an optional resource is not available") + + + at contextlib.contextmanager +def transient_internet(resource_name, timeout=30.0, errnos=()): + """Return a context manager that raises ResourceDenied when various issues + with the Internet connection manifest themselves as exceptions.""" + default_errnos = [ + ('ECONNREFUSED', 111), + ('ECONNRESET', 104), + ('ENETUNREACH', 101), + ('ETIMEDOUT', 110), + ] + default_gai_errnos = [ + ('EAI_NONAME', -2), + ('EAI_NODATA', -5), + ] + + denied = ResourceDenied("Resource '%s' is not available" % resource_name) + captured_errnos = errnos + gai_errnos = [] + if not captured_errnos: + captured_errnos = [getattr(errno, name, num) + for (name, num) in default_errnos] + gai_errnos = [getattr(socket, name, num) + for (name, num) in default_gai_errnos] + + def filter_error(err): + n = getattr(err, 'errno', None) + if (isinstance(err, socket.timeout) or + (isinstance(err, socket.gaierror) and n in gai_errnos) or + n in captured_errnos): + if not verbose: + sys.stderr.write(denied.args[0] + "\n") + raise denied + + old_timeout = socket.getdefaulttimeout() + try: + if timeout is not None: + socket.setdefaulttimeout(timeout) + yield + except IOError as err: + # urllib can wrap original socket errors multiple times (!), we must + # unwrap to get at the original error. + while True: + a = err.args + if len(a) >= 1 and isinstance(a[0], IOError): + err = a[0] + # The error can also be wrapped as args[1]: + # except socket.error as msg: + # raise IOError('socket error', msg).with_traceback(sys.exc_info()[2]) + elif len(a) >= 2 and isinstance(a[1], IOError): + err = a[1] + else: + break + filter_error(err) + raise + # XXX should we catch generic exceptions and look for their + # __cause__ or __context__? + finally: + socket.setdefaulttimeout(old_timeout) + + + at contextlib.contextmanager +def captured_output(stream_name): + """Run the 'with' statement body using a StringIO object in place of a + specific attribute on the sys module. + Example use (with 'stream_name=stdout'):: + + with captured_stdout() as s: + print "hello" + assert s.getvalue() == "hello" + """ + import StringIO + orig_stdout = getattr(sys, stream_name) + setattr(sys, stream_name, StringIO.StringIO()) + try: + yield getattr(sys, stream_name) + finally: + setattr(sys, stream_name, orig_stdout) + +def captured_stdout(): + return captured_output("stdout") + +def captured_stdin(): + return captured_output("stdin") + +def gc_collect(): + """Force as many objects as possible to be collected. + + In non-CPython implementations of Python, this is needed because timely + deallocation is not guaranteed by the garbage collector. (Even in CPython + this can be the case in case of reference cycles.) This means that __del__ + methods may be called later than expected and weakrefs may remain alive for + longer than expected. This function tries its best to force all garbage + objects to disappear. + """ + gc.collect() + if is_jython: + time.sleep(0.1) + gc.collect() + gc.collect() + + +#======================================================================= +# Decorator for running a function in a different locale, correctly resetting +# it afterwards. + +def run_with_locale(catstr, *locales): + def decorator(func): + def inner(*args, **kwds): + try: + import locale + category = getattr(locale, catstr) + orig_locale = locale.setlocale(category) + except AttributeError: + # if the test author gives us an invalid category string + raise + except: + # cannot retrieve original locale, so do nothing + locale = orig_locale = None + else: + for loc in locales: + try: + locale.setlocale(category, loc) + break + except: + pass + + # now run the function, resetting the locale on exceptions + try: + return func(*args, **kwds) + finally: + if locale and orig_locale: + locale.setlocale(category, orig_locale) + inner.func_name = func.func_name + inner.__doc__ = func.__doc__ + return inner + return decorator + +#======================================================================= +# Big-memory-test support. Separate from 'resources' because memory use should be configurable. + +# Some handy shorthands. Note that these are used for byte-limits as well +# as size-limits, in the various bigmem tests +_1M = 1024*1024 +_1G = 1024 * _1M +_2G = 2 * _1G +_4G = 4 * _1G + +MAX_Py_ssize_t = sys.maxsize + +def set_memlimit(limit): + global max_memuse + global real_max_memuse + sizes = { + 'k': 1024, + 'm': _1M, + 'g': _1G, + 't': 1024*_1G, + } + m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit, + re.IGNORECASE | re.VERBOSE) + if m is None: + raise ValueError('Invalid memory limit %r' % (limit,)) + memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()]) + real_max_memuse = memlimit + if memlimit > MAX_Py_ssize_t: + memlimit = MAX_Py_ssize_t + if memlimit < _2G - 1: + raise ValueError('Memory limit %r too low to be useful' % (limit,)) + max_memuse = memlimit + +def bigmemtest(minsize, memuse, overhead=5*_1M): + """Decorator for bigmem tests. + + 'minsize' is the minimum useful size for the test (in arbitrary, + test-interpreted units.) 'memuse' is the number of 'bytes per size' for + the test, or a good estimate of it. 'overhead' specifies fixed overhead, + independent of the testsize, and defaults to 5Mb. + + The decorator tries to guess a good value for 'size' and passes it to + the decorated test function. If minsize * memuse is more than the + allowed memory use (as defined by max_memuse), the test is skipped. + Otherwise, minsize is adjusted upward to use up to max_memuse. + """ + def decorator(f): + def wrapper(self): + if not max_memuse: + # If max_memuse is 0 (the default), + # we still want to run the tests with size set to a few kb, + # to make sure they work. We still want to avoid using + # too much memory, though, but we do that noisily. + maxsize = 5147 + self.assertFalse(maxsize * memuse + overhead > 20 * _1M) + else: + maxsize = int((max_memuse - overhead) / memuse) + if maxsize < minsize: + # Really ought to print 'test skipped' or something + if verbose: + sys.stderr.write("Skipping %s because of memory " + "constraint\n" % (f.__name__,)) + return + # Try to keep some breathing room in memory use + maxsize = max(maxsize - 50 * _1M, minsize) + return f(self, maxsize) + wrapper.minsize = minsize + wrapper.memuse = memuse + wrapper.overhead = overhead + return wrapper + return decorator + +def precisionbigmemtest(size, memuse, overhead=5*_1M): + def decorator(f): + def wrapper(self): + if not real_max_memuse: + maxsize = 5147 + else: + maxsize = size + + if real_max_memuse and real_max_memuse < maxsize * memuse: + if verbose: + sys.stderr.write("Skipping %s because of memory " + "constraint\n" % (f.__name__,)) + return + + return f(self, maxsize) + wrapper.size = size + wrapper.memuse = memuse + wrapper.overhead = overhead + return wrapper + return decorator + +def bigaddrspacetest(f): + """Decorator for tests that fill the address space.""" + def wrapper(self): + if max_memuse < MAX_Py_ssize_t: + if verbose: + sys.stderr.write("Skipping %s because of memory " + "constraint\n" % (f.__name__,)) + else: + return f(self) + return wrapper + +#======================================================================= +# unittest integration. + +class BasicTestRunner: + def run(self, test): + result = unittest.TestResult() + test(result) + return result + +def _id(obj): + return obj + +def requires_resource(resource): + if resource_is_enabled(resource): + return _id + else: + return unittest.skip("resource {0!r} is not enabled".format(resource)) + +def cpython_only(test): + """ + Decorator for tests only applicable on CPython. + """ + return impl_detail(cpython=True)(test) + +def impl_detail(msg=None, **guards): + if check_impl_detail(**guards): + return _id + if msg is None: + guardnames, default = _parse_guards(guards) + if default: + msg = "implementation detail not available on {0}" + else: + msg = "implementation detail specific to {0}" + guardnames = sorted(guardnames.keys()) + msg = msg.format(' or '.join(guardnames)) + return unittest.skip(msg) + +def _parse_guards(guards): + # Returns a tuple ({platform_name: run_me}, default_value) + if not guards: + return ({'cpython': True}, False) + is_true = guards.values()[0] + assert guards.values() == [is_true] * len(guards) # all True or all False + return (guards, not is_true) + +# Use the following check to guard CPython's implementation-specific tests -- +# or to run them only on the implementation(s) guarded by the arguments. +def check_impl_detail(**guards): + """This function returns True or False depending on the host platform. + Examples: + if check_impl_detail(): # only on CPython (default) + if check_impl_detail(jython=True): # only on Jython + if check_impl_detail(cpython=False): # everywhere except on CPython + """ + guards, default = _parse_guards(guards) + return guards.get(platform.python_implementation().lower(), default) + + + +def _run_suite(suite): + """Run tests from a unittest.TestSuite-derived class.""" + if verbose: + runner = unittest.TextTestRunner(sys.stdout, verbosity=2) + else: + runner = BasicTestRunner() + + result = runner.run(suite) + if not result.wasSuccessful(): + if len(result.errors) == 1 and not result.failures: + err = result.errors[0][1] + elif len(result.failures) == 1 and not result.errors: + err = result.failures[0][1] + else: + err = "multiple errors occurred" + if not verbose: + err += "; run in verbose mode for details" + raise TestFailed(err) + + +def run_unittest(*classes): + """Run tests from unittest.TestCase-derived classes.""" + valid_types = (unittest.TestSuite, unittest.TestCase) + suite = unittest.TestSuite() + for cls in classes: + if isinstance(cls, str): + if cls in sys.modules: + suite.addTest(unittest.findTestCases(sys.modules[cls])) + else: + raise ValueError("str arguments must be keys in sys.modules") + elif isinstance(cls, valid_types): + suite.addTest(cls) + else: + suite.addTest(unittest.makeSuite(cls)) + _run_suite(suite) + + +#======================================================================= +# doctest driver. + +def run_doctest(module, verbosity=None): + """Run doctest on the given module. Return (#failures, #tests). + + If optional argument verbosity is not specified (or is None), pass + test_support's belief about verbosity on to doctest. Else doctest's + usual behavior is used (it searches sys.argv for -v). + """ + + import doctest + + if verbosity is None: + verbosity = verbose + else: + verbosity = None + + # Direct doctest output (normally just errors) to real stdout; doctest + # output shouldn't be compared by regrtest. + save_stdout = sys.stdout + sys.stdout = get_original_stdout() + try: + f, t = doctest.testmod(module, verbose=verbosity) + if f: + raise TestFailed("%d of %d doctests failed" % (f, t)) + finally: + sys.stdout = save_stdout + if verbose: + print 'doctest (%s) ... %d tests with zero failures' % (module.__name__, t) + return f, t + +#======================================================================= +# Threading support to prevent reporting refleaks when running regrtest.py -R + +# NOTE: we use thread._count() rather than threading.enumerate() (or the +# moral equivalent thereof) because a threading.Thread object is still alive +# until its __bootstrap() method has returned, even after it has been +# unregistered from the threading module. +# thread._count(), on the other hand, only gets decremented *after* the +# __bootstrap() method has returned, which gives us reliable reference counts +# at the end of a test run. + +def threading_setup(): + if thread: + return thread._count(), + else: + return 1, + +def threading_cleanup(nb_threads): + if not thread: + return + + _MAX_COUNT = 10 + for count in range(_MAX_COUNT): + n = thread._count() + if n == nb_threads: + break + time.sleep(0.1) + # XXX print a warning in case of failure? + +def reap_threads(func): + """Use this function when threads are being used. This will + ensure that the threads are cleaned up even when the test fails. + If threading is unavailable this function does nothing. + """ + if not thread: + return func + + @functools.wraps(func) + def decorator(*args): + key = threading_setup() + try: + return func(*args) + finally: + threading_cleanup(*key) + return decorator + +def reap_children(): + """Use this function at the end of test_main() whenever sub-processes + are started. This will help ensure that no extra children (zombies) + stick around to hog resources and create problems when looking + for refleaks. + """ + + # Reap all our dead child processes so we don't leave zombies around. + # These hog resources and might be causing some of the buildbots to die. + if hasattr(os, 'waitpid'): + any_process = -1 + while True: + try: + # This will raise an exception on Windows. That's ok. + pid, status = os.waitpid(any_process, os.WNOHANG) + if pid == 0: + break + except: + break + +def py3k_bytes(b): + """Emulate the py3k bytes() constructor. + + NOTE: This is only a best effort function. + """ + try: + # memoryview? + return b.tobytes() + except AttributeError: + try: + # iterable of ints? + return b"".join(chr(x) for x in b) + except TypeError: + return bytes(b) From afa at codespeak.net Mon Sep 13 01:01:05 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Mon, 13 Sep 2010 01:01:05 +0200 (CEST) Subject: [pypy-svn] r77031 - pypy/branch/fast-forward/lib-python/modified-2.7.0/test Message-ID: <20100912230105.B7DF8282B90@codespeak.net> Author: afa Date: Mon Sep 13 01:01:04 2010 New Revision: 77031 Added: pypy/branch/fast-forward/lib-python/modified-2.7.0/test/regrtest.py (contents, props changed) Log: One missing file for the tests to run Added: pypy/branch/fast-forward/lib-python/modified-2.7.0/test/regrtest.py ============================================================================== --- (empty file) +++ pypy/branch/fast-forward/lib-python/modified-2.7.0/test/regrtest.py Mon Sep 13 01:01:04 2010 @@ -0,0 +1,1543 @@ +#! /usr/bin/env python + +""" +Usage: + +python -m test.regrtest [options] [test_name1 [test_name2 ...]] +python path/to/Lib/test/regrtest.py [options] [test_name1 [test_name2 ...]] + + +If no arguments or options are provided, finds all files matching +the pattern "test_*" in the Lib/test subdirectory and runs +them in alphabetical order (but see -M and -u, below, for exceptions). + +For more rigorous testing, it is useful to use the following +command line: + +python -E -tt -Wd -3 -m test.regrtest [options] [test_name1 ...] + + +Options: + +-h/--help -- print this text and exit + +Verbosity + +-v/--verbose -- run tests in verbose mode with output to stdout +-w/--verbose2 -- re-run failed tests in verbose mode +-W/--verbose3 -- re-run failed tests in verbose mode immediately +-q/--quiet -- no output unless one or more tests fail +-S/--slow -- print the slowest 10 tests + +Selecting tests + +-r/--random -- randomize test execution order (see below) +-f/--fromfile -- read names of tests to run from a file (see below) +-x/--exclude -- arguments are tests to *exclude* +-s/--single -- single step through a set of tests (see below) +-u/--use RES1,RES2,... + -- specify which special resource intensive tests to run +-M/--memlimit LIMIT + -- run very large memory-consuming tests + +Special runs + +-l/--findleaks -- if GC is available detect tests that leak memory +-L/--runleaks -- run the leaks(1) command just before exit +-R/--huntrleaks RUNCOUNTS + -- search for reference leaks (needs debug build, v. slow) +-j/--multiprocess PROCESSES + -- run PROCESSES processes at once +-T/--coverage -- turn on code coverage tracing using the trace module +-D/--coverdir DIRECTORY + -- Directory where coverage files are put +-N/--nocoverdir -- Put coverage files alongside modules +-t/--threshold THRESHOLD + -- call gc.set_threshold(THRESHOLD) +-F/--forever -- run the specified tests in a loop, until an error happens + + +Additional Option Details: + +-r randomizes test execution order. You can use --randseed=int to provide a +int seed value for the randomizer; this is useful for reproducing troublesome +test orders. + +-s On the first invocation of regrtest using -s, the first test file found +or the first test file given on the command line is run, and the name of +the next test is recorded in a file named pynexttest. If run from the +Python build directory, pynexttest is located in the 'build' subdirectory, +otherwise it is located in tempfile.gettempdir(). On subsequent runs, +the test in pynexttest is run, and the next test is written to pynexttest. +When the last test has been run, pynexttest is deleted. In this way it +is possible to single step through the test files. This is useful when +doing memory analysis on the Python interpreter, which process tends to +consume too many resources to run the full regression test non-stop. + +-f reads the names of tests from the file given as f's argument, one +or more test names per line. Whitespace is ignored. Blank lines and +lines beginning with '#' are ignored. This is especially useful for +whittling down failures involving interactions among tests. + +-L causes the leaks(1) command to be run just before exit if it exists. +leaks(1) is available on Mac OS X and presumably on some other +FreeBSD-derived systems. + +-R runs each test several times and examines sys.gettotalrefcount() to +see if the test appears to be leaking references. The argument should +be of the form stab:run:fname where 'stab' is the number of times the +test is run to let gettotalrefcount settle down, 'run' is the number +of times further it is run and 'fname' is the name of the file the +reports are written to. These parameters all have defaults (5, 4 and +"reflog.txt" respectively), and the minimal invocation is '-R :'. + +-M runs tests that require an exorbitant amount of memory. These tests +typically try to ascertain containers keep working when containing more than +2 billion objects, which only works on 64-bit systems. There are also some +tests that try to exhaust the address space of the process, which only makes +sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit, +which is a string in the form of '2.5Gb', determines howmuch memory the +tests will limit themselves to (but they may go slightly over.) The number +shouldn't be more memory than the machine has (including swap memory). You +should also keep in mind that swap memory is generally much, much slower +than RAM, and setting memlimit to all available RAM or higher will heavily +tax the machine. On the other hand, it is no use running these tests with a +limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect +to use more than memlimit memory will be skipped. The big-memory tests +generally run very, very long. + +-u is used to specify which special resource intensive tests to run, +such as those requiring large file support or network connectivity. +The argument is a comma-separated list of words indicating the +resources to test. Currently only the following are defined: + + all - Enable all special resources. + + audio - Tests that use the audio device. (There are known + cases of broken audio drivers that can crash Python or + even the Linux kernel.) + + curses - Tests that use curses and will modify the terminal's + state and output modes. + + largefile - It is okay to run some test that may create huge + files. These tests can take a long time and may + consume >2GB of disk space temporarily. + + network - It is okay to run tests that use external network + resource, e.g. testing SSL support for sockets. + + bsddb - It is okay to run the bsddb testsuite, which takes + a long time to complete. + + decimal - Test the decimal module against a large suite that + verifies compliance with standards. + + compiler - Test the compiler package by compiling all the source + in the standard library and test suite. This takes + a long time. Enabling this resource also allows + test_tokenize to verify round-trip lexing on every + file in the test library. + + subprocess Run all tests for the subprocess module. + + urlfetch - It is okay to download files required on testing. + + gui - Run tests that require a running GUI. + + xpickle - Test pickle and cPickle against Python 2.4, 2.5 and 2.6 to + test backwards compatibility. These tests take a long time + to run. + +To enable all resources except one, use '-uall,-'. For +example, to run all the tests except for the bsddb tests, give the +option '-uall,-bsddb'. +""" + +import StringIO +import getopt +import json +import os +import random +import re +import sys +import time +import traceback +import warnings +import unittest +import tempfile +import imp +import platform +import sysconfig + + +# Some times __path__ and __file__ are not absolute (e.g. while running from +# Lib/) and, if we change the CWD to run the tests in a temporary dir, some +# imports might fail. This affects only the modules imported before os.chdir(). +# These modules are searched first in sys.path[0] (so '' -- the CWD) and if +# they are found in the CWD their __file__ and __path__ will be relative (this +# happens before the chdir). All the modules imported after the chdir, are +# not found in the CWD, and since the other paths in sys.path[1:] are absolute +# (site.py absolutize them), the __file__ and __path__ will be absolute too. +# Therefore it is necessary to absolutize manually the __file__ and __path__ of +# the packages to prevent later imports to fail when the CWD is different. +for module in sys.modules.itervalues(): + if hasattr(module, '__path__'): + module.__path__ = [os.path.abspath(path) for path in module.__path__] + if hasattr(module, '__file__'): + module.__file__ = os.path.abspath(module.__file__) + + +# MacOSX (a.k.a. Darwin) has a default stack size that is too small +# for deeply recursive regular expressions. We see this as crashes in +# the Python test suite when running test_re.py and test_sre.py. The +# fix is to set the stack limit to 2048. +# This approach may also be useful for other Unixy platforms that +# suffer from small default stack limits. +if sys.platform == 'darwin': + try: + import resource + except ImportError: + pass + else: + soft, hard = resource.getrlimit(resource.RLIMIT_STACK) + newsoft = min(hard, max(soft, 1024*2048)) + resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard)) + +# Test result constants. +PASSED = 1 +FAILED = 0 +ENV_CHANGED = -1 +SKIPPED = -2 +RESOURCE_DENIED = -3 +INTERRUPTED = -4 + +from test import test_support + +RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network', 'bsddb', + 'decimal', 'compiler', 'subprocess', 'urlfetch', 'gui', + 'xpickle') + +TEMPDIR = os.path.abspath(tempfile.gettempdir()) + + +def usage(code, msg=''): + print __doc__ + if msg: print msg + sys.exit(code) + + +def main(tests=None, testdir=None, verbose=0, quiet=False, + exclude=False, single=False, randomize=False, fromfile=None, + findleaks=False, use_resources=None, trace=False, coverdir='coverage', + runleaks=False, huntrleaks=False, verbose2=False, print_slow=False, + random_seed=None, use_mp=None, verbose3=False, forever=False): + """Execute a test suite. + + This also parses command-line options and modifies its behavior + accordingly. + + tests -- a list of strings containing test names (optional) + testdir -- the directory in which to look for tests (optional) + + Users other than the Python test suite will certainly want to + specify testdir; if it's omitted, the directory containing the + Python test suite is searched for. + + If the tests argument is omitted, the tests listed on the + command-line will be used. If that's empty, too, then all *.py + files beginning with test_ will be used. + + The other default arguments (verbose, quiet, exclude, + single, randomize, findleaks, use_resources, trace, coverdir, + print_slow, and random_seed) allow programmers calling main() + directly to set the values that would normally be set by flags + on the command line. + """ + + test_support.record_original_stdout(sys.stdout) + try: + opts, args = getopt.getopt(sys.argv[1:], 'hvqxsSrf:lu:t:TD:NLR:FwWM:j:', + ['help', 'verbose', 'verbose2', 'verbose3', 'quiet', + 'exclude', 'single', 'slow', 'random', 'fromfile', 'findleaks', + 'use=', 'threshold=', 'trace', 'coverdir=', 'nocoverdir', + 'runleaks', 'huntrleaks=', 'memlimit=', 'randseed=', + 'multiprocess=', 'slaveargs=', 'forever']) + except getopt.error, msg: + usage(2, msg) + + # Defaults + if random_seed is None: + random_seed = random.randrange(10000000) + if use_resources is None: + use_resources = [] + for o, a in opts: + if o in ('-h', '--help'): + usage(0) + elif o in ('-v', '--verbose'): + verbose += 1 + elif o in ('-w', '--verbose2'): + verbose2 = True + elif o in ('-W', '--verbose3'): + verbose3 = True + elif o in ('-q', '--quiet'): + quiet = True; + verbose = 0 + elif o in ('-x', '--exclude'): + exclude = True + elif o in ('-s', '--single'): + single = True + elif o in ('-S', '--slow'): + print_slow = True + elif o in ('-r', '--randomize'): + randomize = True + elif o == '--randseed': + random_seed = int(a) + elif o in ('-f', '--fromfile'): + fromfile = a + elif o in ('-l', '--findleaks'): + findleaks = True + elif o in ('-L', '--runleaks'): + runleaks = True + elif o in ('-t', '--threshold'): + import gc + gc.set_threshold(int(a)) + elif o in ('-T', '--coverage'): + trace = True + elif o in ('-D', '--coverdir'): + coverdir = os.path.join(os.getcwd(), a) + elif o in ('-N', '--nocoverdir'): + coverdir = None + elif o in ('-R', '--huntrleaks'): + huntrleaks = a.split(':') + if len(huntrleaks) not in (2, 3): + print a, huntrleaks + usage(2, '-R takes 2 or 3 colon-separated arguments') + if not huntrleaks[0]: + huntrleaks[0] = 5 + else: + huntrleaks[0] = int(huntrleaks[0]) + if not huntrleaks[1]: + huntrleaks[1] = 4 + else: + huntrleaks[1] = int(huntrleaks[1]) + if len(huntrleaks) == 2 or not huntrleaks[2]: + huntrleaks[2:] = ["reflog.txt"] + elif o in ('-M', '--memlimit'): + test_support.set_memlimit(a) + elif o in ('-u', '--use'): + u = [x.lower() for x in a.split(',')] + for r in u: + if r == 'all': + use_resources[:] = RESOURCE_NAMES + continue + remove = False + if r[0] == '-': + remove = True + r = r[1:] + if r not in RESOURCE_NAMES: + usage(1, 'Invalid -u/--use option: ' + a) + if remove: + if r in use_resources: + use_resources.remove(r) + elif r not in use_resources: + use_resources.append(r) + elif o in ('-F', '--forever'): + forever = True + elif o in ('-j', '--multiprocess'): + use_mp = int(a) + elif o == '--slaveargs': + args, kwargs = json.loads(a) + try: + result = runtest(*args, **kwargs) + except BaseException, e: + result = INTERRUPTED, e.__class__.__name__ + print # Force a newline (just in case) + print json.dumps(result) + sys.exit(0) + else: + print >>sys.stderr, ("No handler for option {}. Please " + "report this as a bug at http://bugs.python.org.").format(o) + sys.exit(1) + if single and fromfile: + usage(2, "-s and -f don't go together!") + if use_mp and trace: + usage(2, "-T and -j don't go together!") + if use_mp and findleaks: + usage(2, "-l and -j don't go together!") + if use_mp and max(sys.flags): + # TODO: inherit the environment and the flags + print "Warning: flags and environment variables are ignored with -j option" + + good = [] + bad = [] + skipped = [] + resource_denieds = [] + environment_changed = [] + interrupted = False + + if findleaks: + try: + import gc + except ImportError: + print 'No GC available, disabling findleaks.' + findleaks = False + else: + # Uncomment the line below to report garbage that is not + # freeable by reference counting alone. By default only + # garbage that is not collectable by the GC is reported. + #gc.set_debug(gc.DEBUG_SAVEALL) + found_garbage = [] + + if single: + filename = os.path.join(TEMPDIR, 'pynexttest') + try: + fp = open(filename, 'r') + next_test = fp.read().strip() + tests = [next_test] + fp.close() + except IOError: + pass + + if fromfile: + tests = [] + fp = open(os.path.join(test_support.SAVEDCWD, fromfile)) + for line in fp: + guts = line.split() # assuming no test has whitespace in its name + if guts and not guts[0].startswith('#'): + tests.extend(guts) + fp.close() + + # Strip .py extensions. + removepy(args) + removepy(tests) + + stdtests = STDTESTS[:] + nottests = NOTTESTS.copy() + if exclude: + for arg in args: + if arg in stdtests: + stdtests.remove(arg) + nottests.add(arg) + args = [] + + # For a partial run, we do not need to clutter the output. + if verbose or not (quiet or single or tests or args): + # Print basic platform information + print "==", platform.python_implementation(), \ + " ".join(sys.version.split()) + print "== ", platform.platform(aliased=True), \ + "%s-endian" % sys.byteorder + print "== ", os.getcwd() + + alltests = findtests(testdir, stdtests, nottests) + selected = tests or args or alltests + if single: + selected = selected[:1] + try: + next_single_test = alltests[alltests.index(selected[0])+1] + except IndexError: + next_single_test = None + if randomize: + random.seed(random_seed) + print "Using random seed", random_seed + random.shuffle(selected) + if trace: + import trace + tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix], + trace=False, count=True) + + test_times = [] + test_support.use_resources = use_resources + save_modules = sys.modules.keys() + + def accumulate_result(test, result): + ok, test_time = result + test_times.append((test_time, test)) + if ok == PASSED: + good.append(test) + elif ok == FAILED: + bad.append(test) + elif ok == ENV_CHANGED: + bad.append(test) + environment_changed.append(test) + elif ok == SKIPPED: + skipped.append(test) + elif ok == RESOURCE_DENIED: + skipped.append(test) + resource_denieds.append(test) + + if forever: + def test_forever(tests=list(selected)): + while True: + for test in tests: + yield test + if bad: + return + tests = test_forever() + else: + tests = iter(selected) + + if use_mp: + try: + from threading import Thread + except ImportError: + print "Multiprocess option requires thread support" + sys.exit(2) + from Queue import Queue + from subprocess import Popen, PIPE + debug_output_pat = re.compile(r"\[\d+ refs\]$") + output = Queue() + def tests_and_args(): + for test in tests: + args_tuple = ( + (test, verbose, quiet, testdir), + dict(huntrleaks=huntrleaks, use_resources=use_resources) + ) + yield (test, args_tuple) + pending = tests_and_args() + def work(): + # A worker thread. + try: + while True: + try: + test, args_tuple = next(pending) + except StopIteration: + output.put((None, None, None, None)) + return + # -E is needed by some tests, e.g. test_import + popen = Popen([sys.executable, '-E', '-m', 'test.regrtest', + '--slaveargs', json.dumps(args_tuple)], + stdout=PIPE, stderr=PIPE, + universal_newlines=True, + close_fds=(os.name != 'nt')) + stdout, stderr = popen.communicate() + # Strip last refcount output line if it exists, since it + # comes from the shutdown of the interpreter in the subcommand. + stderr = debug_output_pat.sub("", stderr) + stdout, _, result = stdout.strip().rpartition("\n") + if not result: + output.put((None, None, None, None)) + return + result = json.loads(result) + if not quiet: + stdout = test+'\n'+stdout + output.put((test, stdout.rstrip(), stderr.rstrip(), result)) + except BaseException: + output.put((None, None, None, None)) + raise + workers = [Thread(target=work) for i in range(use_mp)] + for worker in workers: + worker.start() + finished = 0 + try: + while finished < use_mp: + test, stdout, stderr, result = output.get() + if test is None: + finished += 1 + continue + if stdout: + print stdout + if stderr: + print >>sys.stderr, stderr + if result[0] == INTERRUPTED: + assert result[1] == 'KeyboardInterrupt' + raise KeyboardInterrupt # What else? + accumulate_result(test, result) + except KeyboardInterrupt: + interrupted = True + pending.close() + for worker in workers: + worker.join() + else: + for test in tests: + if not quiet: + print test + sys.stdout.flush() + if trace: + # If we're tracing code coverage, then we don't exit with status + # if on a false return value from main. + tracer.runctx('runtest(test, verbose, quiet, testdir)', + globals=globals(), locals=vars()) + else: + try: + result = runtest(test, verbose, quiet, + testdir, huntrleaks) + accumulate_result(test, result) + if verbose3 and result[0] == FAILED: + print "Re-running test %r in verbose mode" % test + runtest(test, True, quiet, testdir, huntrleaks) + except KeyboardInterrupt: + interrupted = True + break + except: + raise + if findleaks: + gc.collect() + if gc.garbage: + print "Warning: test created", len(gc.garbage), + print "uncollectable object(s)." + # move the uncollectable objects somewhere so we don't see + # them again + found_garbage.extend(gc.garbage) + del gc.garbage[:] + # Unload the newly imported modules (best effort finalization) + for module in sys.modules.keys(): + if module not in save_modules and module.startswith("test."): + test_support.unload(module) + + if interrupted: + # print a newline after ^C + print + print "Test suite interrupted by signal SIGINT." + omitted = set(selected) - set(good) - set(bad) - set(skipped) + print count(len(omitted), "test"), "omitted:" + printlist(omitted) + if good and not quiet: + if not bad and not skipped and not interrupted and len(good) > 1: + print "All", + print count(len(good), "test"), "OK." + if print_slow: + test_times.sort(reverse=True) + print "10 slowest tests:" + for time, test in test_times[:10]: + print "%s: %.1fs" % (test, time) + if bad: + bad = set(bad) - set(environment_changed) + if bad: + print count(len(bad), "test"), "failed:" + printlist(bad) + if environment_changed: + print "{} altered the execution environment:".format( + count(len(environment_changed), "test")) + printlist(environment_changed) + if skipped and not quiet: + print count(len(skipped), "test"), "skipped:" + printlist(skipped) + + e = _ExpectedSkips() + plat = sys.platform + if e.isvalid(): + surprise = set(skipped) - e.getexpected() - set(resource_denieds) + if surprise: + print count(len(surprise), "skip"), \ + "unexpected on", plat + ":" + printlist(surprise) + else: + print "Those skips are all expected on", plat + "." + else: + print "Ask someone to teach regrtest.py about which tests are" + print "expected to get skipped on", plat + "." + + if verbose2 and bad: + print "Re-running failed tests in verbose mode" + for test in bad: + print "Re-running test %r in verbose mode" % test + sys.stdout.flush() + try: + test_support.verbose = True + ok = runtest(test, True, quiet, testdir, + huntrleaks) + except KeyboardInterrupt: + # print a newline separate from the ^C + print + break + except: + raise + + if single: + if next_single_test: + with open(filename, 'w') as fp: + fp.write(next_single_test + '\n') + else: + os.unlink(filename) + + if trace: + r = tracer.results() + r.write_results(show_missing=True, summary=True, coverdir=coverdir) + + if runleaks: + os.system("leaks %d" % os.getpid()) + + sys.exit(len(bad) > 0 or interrupted) + + +STDTESTS = [ + 'test_grammar', + 'test_opcodes', + 'test_dict', + 'test_builtin', + 'test_exceptions', + 'test_types', + 'test_unittest', + 'test_doctest', + 'test_doctest2', +] + +NOTTESTS = { + 'test_support', + 'test_future1', + 'test_future2', +} + +def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS): + """Return a list of all applicable test modules.""" + if testdir: + testdirs = [testdir] + else: + testdirs = findtestdirs() + names = {} + for testdir in testdirs: + names.update(dict.fromkeys(os.listdir(testdir))) + tests = [] + others = set(stdtests) | nottests + for name in names: + modname, ext = os.path.splitext(name) + if modname[:5] == "test_" and ext == ".py" and modname not in others: + tests.append(modname) + return stdtests + sorted(tests) + +def runtest(test, verbose, quiet, + testdir=None, huntrleaks=False, use_resources=None): + """Run a single test. + + test -- the name of the test + verbose -- if true, print more messages + quiet -- if true, don't print 'skipped' messages (probably redundant) + test_times -- a list of (time, test_name) pairs + testdir -- test directory + huntrleaks -- run multiple times to test for leaks; requires a debug + build; a triple corresponding to -R's three arguments + Returns one of the test result constants: + INTERRUPTED KeyboardInterrupt when run under -j + RESOURCE_DENIED test skipped because resource denied + SKIPPED test skipped for some other reason + ENV_CHANGED test failed because it changed the execution environment + FAILED test failed + PASSED test passed + """ + + test_support.verbose = verbose # Tell tests to be moderately quiet + if use_resources is not None: + test_support.use_resources = use_resources + try: + return runtest_inner(test, verbose, quiet, + testdir, huntrleaks) + finally: + cleanup_test_droppings(test, verbose) + + +# Unit tests are supposed to leave the execution environment unchanged +# once they complete. But sometimes tests have bugs, especially when +# tests fail, and the changes to environment go on to mess up other +# tests. This can cause issues with buildbot stability, since tests +# are run in random order and so problems may appear to come and go. +# There are a few things we can save and restore to mitigate this, and +# the following context manager handles this task. + +class saved_test_environment: + """Save bits of the test environment and restore them at block exit. + + with saved_test_environment(testname, verbose, quiet): + #stuff + + Unless quiet is True, a warning is printed to stderr if any of + the saved items was changed by the test. The attribute 'changed' + is initially False, but is set to True if a change is detected. + + If verbose is more than 1, the before and after state of changed + items is also printed. + """ + + changed = False + + def __init__(self, testname, verbose=0, quiet=False): + self.testname = testname + self.verbose = verbose + self.quiet = quiet + + # To add things to save and restore, add a name XXX to the resources list + # and add corresponding get_XXX/restore_XXX functions. get_XXX should + # return the value to be saved and compared against a second call to the + # get function when test execution completes. restore_XXX should accept + # the saved value and restore the resource using it. It will be called if + # and only if a change in the value is detected. + # + # Note: XXX will have any '.' replaced with '_' characters when determining + # the corresponding method names. + + resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr', + 'os.environ', 'sys.path', 'asyncore.socket_map') + + def get_sys_argv(self): + return id(sys.argv), sys.argv, sys.argv[:] + def restore_sys_argv(self, saved_argv): + sys.argv = saved_argv[1] + sys.argv[:] = saved_argv[2] + + def get_cwd(self): + return os.getcwd() + def restore_cwd(self, saved_cwd): + os.chdir(saved_cwd) + + def get_sys_stdout(self): + return sys.stdout + def restore_sys_stdout(self, saved_stdout): + sys.stdout = saved_stdout + + def get_sys_stderr(self): + return sys.stderr + def restore_sys_stderr(self, saved_stderr): + sys.stderr = saved_stderr + + def get_sys_stdin(self): + return sys.stdin + def restore_sys_stdin(self, saved_stdin): + sys.stdin = saved_stdin + + def get_os_environ(self): + return id(os.environ), os.environ, dict(os.environ) + def restore_os_environ(self, saved_environ): + os.environ = saved_environ[1] + os.environ.clear() + os.environ.update(saved_environ[2]) + + def get_sys_path(self): + return id(sys.path), sys.path, sys.path[:] + def restore_sys_path(self, saved_path): + sys.path = saved_path[1] + sys.path[:] = saved_path[2] + + def get_asyncore_socket_map(self): + asyncore = sys.modules.get('asyncore') + return asyncore and asyncore.socket_map or {} + def restore_asyncore_socket_map(self, saved_map): + asyncore = sys.modules.get('asyncore') + if asyncore is not None: + asyncore.socket_map.clear() + asyncore.socket_map.update(saved_map) + + def resource_info(self): + for name in self.resources: + method_suffix = name.replace('.', '_') + get_name = 'get_' + method_suffix + restore_name = 'restore_' + method_suffix + yield name, getattr(self, get_name), getattr(self, restore_name) + + def __enter__(self): + self.saved_values = dict((name, get()) for name, get, restore + in self.resource_info()) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + for name, get, restore in self.resource_info(): + current = get() + original = self.saved_values[name] + # Check for changes to the resource's value + if current != original: + self.changed = True + restore(original) + if not self.quiet: + print >>sys.stderr, ( + "Warning -- {} was modified by {}".format( + name, self.testname)) + if self.verbose > 1: + print >>sys.stderr, ( + " Before: {}\n After: {} ".format( + original, current)) + # XXX (ncoghlan): for most resources (e.g. sys.path) identity + # matters at least as much as value. For others (e.g. cwd), + # identity is irrelevant. Should we add a mechanism to check + # for substitution in the cases where it matters? + return False + + +def runtest_inner(test, verbose, quiet, + testdir=None, huntrleaks=False): + test_support.unload(test) + if verbose: + capture_stdout = None + else: + capture_stdout = StringIO.StringIO() + + test_time = 0.0 + refleak = False # True if the test leaked references. + try: + save_stdout = sys.stdout + try: + if capture_stdout: + sys.stdout = capture_stdout + if test.startswith('test.'): + abstest = test + else: + # Always import it from the test package + abstest = 'test.' + test + with saved_test_environment(test, verbose, quiet) as environment: + start_time = time.time() + the_package = __import__(abstest, globals(), locals(), []) + the_module = getattr(the_package, test) + # Old tests run to completion simply as a side-effect of + # being imported. For tests based on unittest or doctest, + # explicitly invoke their test_main() function (if it exists). + indirect_test = getattr(the_module, "test_main", None) + if indirect_test is not None: + indirect_test() + if huntrleaks: + refleak = dash_R(the_module, test, indirect_test, + huntrleaks) + test_time = time.time() - start_time + finally: + sys.stdout = save_stdout + except test_support.ResourceDenied, msg: + if not quiet: + print test, "skipped --", msg + sys.stdout.flush() + return RESOURCE_DENIED, test_time + except unittest.SkipTest, msg: + if not quiet: + print test, "skipped --", msg + sys.stdout.flush() + return SKIPPED, test_time + except KeyboardInterrupt: + raise + except test_support.TestFailed, msg: + print "test", test, "failed --", msg + sys.stdout.flush() + return FAILED, test_time + except: + type, value = sys.exc_info()[:2] + print "test", test, "crashed --", str(type) + ":", value + sys.stdout.flush() + if verbose: + traceback.print_exc(file=sys.stdout) + sys.stdout.flush() + return FAILED, test_time + else: + if refleak: + return FAILED, test_time + if environment.changed: + return ENV_CHANGED, test_time + # Except in verbose mode, tests should not print anything + if verbose or huntrleaks: + return PASSED, test_time + output = capture_stdout.getvalue() + if not output: + return PASSED, test_time + print "test", test, "produced unexpected output:" + print "*" * 70 + print output + print "*" * 70 + sys.stdout.flush() + return FAILED, test_time + +def cleanup_test_droppings(testname, verbose): + import shutil + import stat + + # Try to clean up junk commonly left behind. While tests shouldn't leave + # any files or directories behind, when a test fails that can be tedious + # for it to arrange. The consequences can be especially nasty on Windows, + # since if a test leaves a file open, it cannot be deleted by name (while + # there's nothing we can do about that here either, we can display the + # name of the offending test, which is a real help). + for name in (test_support.TESTFN, + "db_home", + ): + if not os.path.exists(name): + continue + + if os.path.isdir(name): + kind, nuker = "directory", shutil.rmtree + elif os.path.isfile(name): + kind, nuker = "file", os.unlink + else: + raise SystemError("os.path says %r exists but is neither " + "directory nor file" % name) + + if verbose: + print "%r left behind %s %r" % (testname, kind, name) + try: + # if we have chmod, fix possible permissions problems + # that might prevent cleanup + if (hasattr(os, 'chmod')): + os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) + nuker(name) + except Exception, msg: + print >> sys.stderr, ("%r left behind %s %r and it couldn't be " + "removed: %s" % (testname, kind, name, msg)) + +def dash_R(the_module, test, indirect_test, huntrleaks): + """Run a test multiple times, looking for reference leaks. + + Returns: + False if the test didn't leak references; True if we detected refleaks. + """ + # This code is hackish and inelegant, but it seems to do the job. + import copy_reg, _abcoll, _pyio + + if not hasattr(sys, 'gettotalrefcount'): + raise Exception("Tracking reference leaks requires a debug build " + "of Python") + + # Save current values for dash_R_cleanup() to restore. + fs = warnings.filters[:] + ps = copy_reg.dispatch_table.copy() + pic = sys.path_importer_cache.copy() + try: + import zipimport + except ImportError: + zdc = None # Run unmodified on platforms without zipimport support + else: + zdc = zipimport._zip_directory_cache.copy() + abcs = {} + modules = _abcoll, _pyio + for abc in [getattr(mod, a) for mod in modules for a in mod.__all__]: + # XXX isinstance(abc, ABCMeta) leads to infinite recursion + if not hasattr(abc, '_abc_registry'): + continue + for obj in abc.__subclasses__() + [abc]: + abcs[obj] = obj._abc_registry.copy() + + if indirect_test: + def run_the_test(): + indirect_test() + else: + def run_the_test(): + imp.reload(the_module) + + deltas = [] + nwarmup, ntracked, fname = huntrleaks + fname = os.path.join(test_support.SAVEDCWD, fname) + repcount = nwarmup + ntracked + print >> sys.stderr, "beginning", repcount, "repetitions" + print >> sys.stderr, ("1234567890"*(repcount//10 + 1))[:repcount] + dash_R_cleanup(fs, ps, pic, zdc, abcs) + for i in range(repcount): + rc_before = sys.gettotalrefcount() + run_the_test() + sys.stderr.write('.') + dash_R_cleanup(fs, ps, pic, zdc, abcs) + rc_after = sys.gettotalrefcount() + if i >= nwarmup: + deltas.append(rc_after - rc_before) + print >> sys.stderr + if any(deltas): + msg = '%s leaked %s references, sum=%s' % (test, deltas, sum(deltas)) + print >> sys.stderr, msg + with open(fname, "a") as refrep: + print >> refrep, msg + refrep.flush() + return True + return False + +def dash_R_cleanup(fs, ps, pic, zdc, abcs): + import gc, copy_reg + import _strptime, linecache + dircache = test_support.import_module('dircache', deprecated=True) + import urlparse, urllib, urllib2, mimetypes, doctest + import struct, filecmp + from distutils.dir_util import _path_created + + # Clear the warnings registry, so they can be displayed again + for mod in sys.modules.values(): + if hasattr(mod, '__warningregistry__'): + del mod.__warningregistry__ + + # Restore some original values. + warnings.filters[:] = fs + copy_reg.dispatch_table.clear() + copy_reg.dispatch_table.update(ps) + sys.path_importer_cache.clear() + sys.path_importer_cache.update(pic) + try: + import zipimport + except ImportError: + pass # Run unmodified on platforms without zipimport support + else: + zipimport._zip_directory_cache.clear() + zipimport._zip_directory_cache.update(zdc) + + # clear type cache + sys._clear_type_cache() + + # Clear ABC registries, restoring previously saved ABC registries. + for abc, registry in abcs.items(): + abc._abc_registry = registry.copy() + abc._abc_cache.clear() + abc._abc_negative_cache.clear() + + # Clear assorted module caches. + _path_created.clear() + re.purge() + _strptime._regex_cache.clear() + urlparse.clear_cache() + urllib.urlcleanup() + urllib2.install_opener(None) + dircache.reset() + linecache.clearcache() + mimetypes._default_mime_types() + filecmp._cache.clear() + struct._clearcache() + doctest.master = None + + # Collect cyclic trash. + gc.collect() + +def findtestdirs(): + # XXX hacking: returns a list of both the '2.7.0/test' and the + # 'modified-2.7.0/test' directories, as full paths. + testdir = os.path.abspath(os.path.dirname(__file__) or os.curdir) + assert os.path.basename(testdir).lower() == 'test' + maindir = os.path.dirname(testdir) + libpythondir = os.path.dirname(maindir) + maindirname = os.path.basename(maindir).lower() + if maindirname.startswith('modified-'): + maindirname = maindirname[len('modified-'):] + testdir1 = os.path.join(libpythondir, maindirname, 'test') + testdir2 = os.path.join(libpythondir, 'modified-'+maindirname, 'test') + return [testdir1, testdir2] + +def removepy(names): + if not names: + return + for idx, name in enumerate(names): + basename, ext = os.path.splitext(name) + if ext == '.py': + names[idx] = basename + +def count(n, word): + if n == 1: + return "%d %s" % (n, word) + else: + return "%d %ss" % (n, word) + +def printlist(x, width=70, indent=4): + """Print the elements of iterable x to stdout. + + Optional arg width (default 70) is the maximum line length. + Optional arg indent (default 4) is the number of blanks with which to + begin each line. + """ + + from textwrap import fill + blanks = ' ' * indent + # Print the sorted list: 'x' may be a '--random' list or a set() + print fill(' '.join(str(elt) for elt in sorted(x)), width, + initial_indent=blanks, subsequent_indent=blanks) + +# Map sys.platform to a string containing the basenames of tests +# expected to be skipped on that platform. +# +# Special cases: +# test_pep277 +# The _ExpectedSkips constructor adds this to the set of expected +# skips if not os.path.supports_unicode_filenames. +# test_timeout +# Controlled by test_timeout.skip_expected. Requires the network +# resource and a socket module. +# +# Tests that are expected to be skipped everywhere except on one platform +# are also handled separately. + +_expectations = { + 'win32': + """ + test__locale + test_bsddb185 + test_bsddb3 + test_commands + test_crypt + test_curses + test_dbm + test_dl + test_fcntl + test_fork1 + test_epoll + test_gdbm + test_grp + test_ioctl + test_largefile + test_kqueue + test_mhlib + test_openpty + test_ossaudiodev + test_pipes + test_poll + test_posix + test_pty + test_pwd + test_resource + test_signal + test_threadsignals + test_timing + test_wait3 + test_wait4 + """, + 'linux2': + """ + test_bsddb185 + test_curses + test_dl + test_largefile + test_kqueue + test_ossaudiodev + """, + 'unixware7': + """ + test_bsddb + test_bsddb185 + test_dl + test_epoll + test_largefile + test_kqueue + test_minidom + test_openpty + test_pyexpat + test_sax + test_sundry + """, + 'openunix8': + """ + test_bsddb + test_bsddb185 + test_dl + test_epoll + test_largefile + test_kqueue + test_minidom + test_openpty + test_pyexpat + test_sax + test_sundry + """, + 'sco_sv3': + """ + test_asynchat + test_bsddb + test_bsddb185 + test_dl + test_fork1 + test_epoll + test_gettext + test_largefile + test_locale + test_kqueue + test_minidom + test_openpty + test_pyexpat + test_queue + test_sax + test_sundry + test_thread + test_threaded_import + test_threadedtempfile + test_threading + """, + 'riscos': + """ + test_asynchat + test_atexit + test_bsddb + test_bsddb185 + test_bsddb3 + test_commands + test_crypt + test_dbm + test_dl + test_fcntl + test_fork1 + test_epoll + test_gdbm + test_grp + test_largefile + test_locale + test_kqueue + test_mmap + test_openpty + test_poll + test_popen2 + test_pty + test_pwd + test_strop + test_sundry + test_thread + test_threaded_import + test_threadedtempfile + test_threading + test_timing + """, + 'darwin': + """ + test__locale + test_bsddb + test_bsddb3 + test_curses + test_epoll + test_gdbm + test_largefile + test_locale + test_kqueue + test_minidom + test_ossaudiodev + test_poll + """, + 'sunos5': + """ + test_bsddb + test_bsddb185 + test_curses + test_dbm + test_epoll + test_kqueue + test_gdbm + test_gzip + test_openpty + test_zipfile + test_zlib + """, + 'hp-ux11': + """ + test_bsddb + test_bsddb185 + test_curses + test_dl + test_epoll + test_gdbm + test_gzip + test_largefile + test_locale + test_kqueue + test_minidom + test_openpty + test_pyexpat + test_sax + test_zipfile + test_zlib + """, + 'atheos': + """ + test_bsddb185 + test_curses + test_dl + test_gdbm + test_epoll + test_largefile + test_locale + test_kqueue + test_mhlib + test_mmap + test_poll + test_popen2 + test_resource + """, + 'cygwin': + """ + test_bsddb185 + test_bsddb3 + test_curses + test_dbm + test_epoll + test_ioctl + test_kqueue + test_largefile + test_locale + test_ossaudiodev + test_socketserver + """, + 'os2emx': + """ + test_audioop + test_bsddb185 + test_bsddb3 + test_commands + test_curses + test_dl + test_epoll + test_kqueue + test_largefile + test_mhlib + test_mmap + test_openpty + test_ossaudiodev + test_pty + test_resource + test_signal + """, + 'freebsd4': + """ + test_bsddb + test_bsddb3 + test_epoll + test_gdbm + test_locale + test_ossaudiodev + test_pep277 + test_pty + test_socketserver + test_tcl + test_tk + test_ttk_guionly + test_ttk_textonly + test_timeout + test_urllibnet + test_multiprocessing + """, + 'aix5': + """ + test_bsddb + test_bsddb185 + test_bsddb3 + test_bz2 + test_dl + test_epoll + test_gdbm + test_gzip + test_kqueue + test_ossaudiodev + test_tcl + test_tk + test_ttk_guionly + test_ttk_textonly + test_zipimport + test_zlib + """, + 'openbsd3': + """ + test_ascii_formatd + test_bsddb + test_bsddb3 + test_ctypes + test_dl + test_epoll + test_gdbm + test_locale + test_normalization + test_ossaudiodev + test_pep277 + test_tcl + test_tk + test_ttk_guionly + test_ttk_textonly + test_multiprocessing + """, + 'netbsd3': + """ + test_ascii_formatd + test_bsddb + test_bsddb185 + test_bsddb3 + test_ctypes + test_curses + test_dl + test_epoll + test_gdbm + test_locale + test_ossaudiodev + test_pep277 + test_tcl + test_tk + test_ttk_guionly + test_ttk_textonly + test_multiprocessing + """, +} +_expectations['freebsd5'] = _expectations['freebsd4'] +_expectations['freebsd6'] = _expectations['freebsd4'] +_expectations['freebsd7'] = _expectations['freebsd4'] +_expectations['freebsd8'] = _expectations['freebsd4'] + +class _ExpectedSkips: + def __init__(self): + import os.path + from test import test_timeout + + self.valid = False + if sys.platform in _expectations: + s = _expectations[sys.platform] + self.expected = set(s.split()) + + # expected to be skipped on every platform, even Linux + self.expected.add('test_linuxaudiodev') + + if not os.path.supports_unicode_filenames: + self.expected.add('test_pep277') + + if test_timeout.skip_expected: + self.expected.add('test_timeout') + + if sys.maxint == 9223372036854775807L: + self.expected.add('test_imageop') + + if sys.platform != "darwin": + MAC_ONLY = ["test_macos", "test_macostools", "test_aepack", + "test_plistlib", "test_scriptpackages", + "test_applesingle"] + for skip in MAC_ONLY: + self.expected.add(skip) + elif len(u'\0'.encode('unicode-internal')) == 4: + self.expected.add("test_macostools") + + + if sys.platform != "win32": + # test_sqlite is only reliable on Windows where the library + # is distributed with Python + WIN_ONLY = ["test_unicode_file", "test_winreg", + "test_winsound", "test_startfile", + "test_sqlite"] + for skip in WIN_ONLY: + self.expected.add(skip) + + if sys.platform != 'irix': + IRIX_ONLY = ["test_imageop", "test_al", "test_cd", "test_cl", + "test_gl", "test_imgfile"] + for skip in IRIX_ONLY: + self.expected.add(skip) + + if sys.platform != 'sunos5': + self.expected.add('test_sunaudiodev') + self.expected.add('test_nis') + + if not sys.py3kwarning: + self.expected.add('test_py3kwarn') + + self.valid = True + + def isvalid(self): + "Return true iff _ExpectedSkips knows about the current platform." + return self.valid + + def getexpected(self): + """Return set of test names we expect to skip on current platform. + + self.isvalid() must be true. + """ + + assert self.isvalid() + return self.expected + +if __name__ == '__main__': + # Simplification for findtestdir(). + assert __file__ == os.path.abspath(sys.argv[0]) + + # When tests are run from the Python build directory, it is best practice + # to keep the test files in a subfolder. It eases the cleanup of leftover + # files using command "make distclean". + if sysconfig.is_python_build(): + TEMPDIR = os.path.join(sysconfig.get_config_var('srcdir'), 'build') + TEMPDIR = os.path.abspath(TEMPDIR) + if not os.path.exists(TEMPDIR): + os.mkdir(TEMPDIR) + + # Define a writable temp dir that will be used as cwd while running + # the tests. The name of the dir includes the pid to allow parallel + # testing (see the -j option). + TESTCWD = 'test_python_{}'.format(os.getpid()) + + TESTCWD = os.path.join(TEMPDIR, TESTCWD) + + # Run the tests in a context manager that temporary changes the CWD to a + # temporary and writable directory. If it's not possible to create or + # change the CWD, the original CWD will be used. The original CWD is + # available from test_support.SAVEDCWD. + with test_support.temp_cwd(TESTCWD, quiet=True): + main() From arigo at codespeak.net Mon Sep 13 09:36:05 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 13 Sep 2010 09:36:05 +0200 (CEST) Subject: [pypy-svn] r77032 - pypy/extradoc/planning Message-ID: <20100913073605.A0162282C01@codespeak.net> Author: arigo Date: Mon Sep 13 09:36:03 2010 New Revision: 77032 Modified: pypy/extradoc/planning/jit.txt Log: Mark some tasks as done. Modified: pypy/extradoc/planning/jit.txt ============================================================================== --- pypy/extradoc/planning/jit.txt (original) +++ pypy/extradoc/planning/jit.txt Mon Sep 13 09:36:03 2010 @@ -2,25 +2,12 @@ --------- - look at assembler-assembler calls again: if the inner function is - traced after the outer one, the call is slow. Might be solved - easily if we implement full out-of-line guards (e.g. by invalidating - the outer function when the inner one gets compiled) - --- - More thoughts (antonio, arigo): by the way we detect traces too long, - we can always immediately compile the subfunction. If we do that then - there is no risk of generating a slow call. + traced after the outer one, the call is slow. DONE - have benchmarks for jit compile time and jit memory usage -- trace into functions even if they have a loop. only if the loop is actually - hit, a residual portal call is produced (status: kill-caninline branch, - DONE) - -- generators are not really fast ? maybe add a JUMP_ABSOLUTE_GENERATOR that - does not call can_enter_jit after an iteration in which there was a yield. - obviously. (status: unclear -- the frame of the generator cannot be a - virtual, so should we somehow make it a virtualizable? e.g. by never inlining - generators?) +- generators are not really fast. DONE, with the only restriction + that the code in generators is never inlined into some caller. - think again about perfect specialization. Check if we loose anything if we turn it off. Another approach to specialization: specialize things @@ -31,7 +18,7 @@ - kill GUARD_(NO)_EXCEPTION; replace that by LAST_EXC_VALUE to load the current exception from the struct in memory, followed by a regular - GUARD_CLASS. + GUARD_CLASS. (Armin: Look like a simplification, but it's a bit messy too) - write a document that says what you cannot expect the jit to optimize. E.g. http://paste.pocoo.org/show/181319/ with B being old-style and From arigo at codespeak.net Mon Sep 13 09:39:04 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 13 Sep 2010 09:39:04 +0200 (CEST) Subject: [pypy-svn] r77033 - pypy/extradoc/planning Message-ID: <20100913073904.E72D4282C01@codespeak.net> Author: arigo Date: Mon Sep 13 09:39:03 2010 New Revision: 77033 Modified: pypy/extradoc/planning/jit.txt Log: Mention string manipulation examples. Modified: pypy/extradoc/planning/jit.txt ============================================================================== --- pypy/extradoc/planning/jit.txt (original) +++ pypy/extradoc/planning/jit.txt Mon Sep 13 09:39:03 2010 @@ -60,6 +60,10 @@ Extracted from some real-life Python programs, examples that don't give nice code at all so far: +- string manipulation: s[n], s[-n], s[i:j], most operations on single + chars, building a big string with repeated "s += t", "a,b=s.split()", + etc. + - http://paste.pocoo.org/show/188520/ this will compile new assembler path for each new type, even though that's overspecialization since in this particular case it's not relevant. From afa at codespeak.net Mon Sep 13 11:24:34 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Mon, 13 Sep 2010 11:24:34 +0200 (CEST) Subject: [pypy-svn] r77034 - pypy/branch/fast-forward/lib-python/modified-2.7.0/test Message-ID: <20100913092434.62E13282C01@codespeak.net> Author: afa Date: Mon Sep 13 11:24:30 2010 New Revision: 77034 Modified: pypy/branch/fast-forward/lib-python/modified-2.7.0/test/__init__.py Log: Copy __init__.py from modified-2.5.2, this allows both modified and unmodified tests to run. Modified: pypy/branch/fast-forward/lib-python/modified-2.7.0/test/__init__.py ============================================================================== --- pypy/branch/fast-forward/lib-python/modified-2.7.0/test/__init__.py (original) +++ pypy/branch/fast-forward/lib-python/modified-2.7.0/test/__init__.py Mon Sep 13 11:24:30 2010 @@ -1 +1,11 @@ -# Dummy file to make this directory a package. +""" +This package only contains the tests that we have modified for PyPy. +It uses the 'official' hack to include the rest of the standard +'test' package from CPython. + +This assumes that sys.path is configured to contain +'lib-python/modified-2.7.0' before 'lib-python/2.7.0'. +""" + +from pkgutil import extend_path +__path__ = extend_path(__path__, __name__) From arigo at codespeak.net Mon Sep 13 13:04:39 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 13 Sep 2010 13:04:39 +0200 (CEST) Subject: [pypy-svn] r77036 - pypy/branch/gen2-gc Message-ID: <20100913110439.495EB282C01@codespeak.net> Author: arigo Date: Mon Sep 13 13:04:37 2010 New Revision: 77036 Added: pypy/branch/gen2-gc/ - copied from r77035, pypy/trunk/ Log: A branch in which to try to implement anogther generational GC. From arigo at codespeak.net Mon Sep 13 14:18:56 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 13 Sep 2010 14:18:56 +0200 (CEST) Subject: [pypy-svn] r77038 - in pypy/branch/gen2-gc/pypy/rpython/memory/gc: . test Message-ID: <20100913121856.0A7C1282C01@codespeak.net> Author: arigo Date: Mon Sep 13 14:18:55 2010 New Revision: 77038 Added: pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py (contents, props changed) pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py (contents, props changed) Log: Start. Added: pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py ============================================================================== --- (empty file) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py Mon Sep 13 14:18:55 2010 @@ -0,0 +1,97 @@ +from pypy.rpython.lltypesystem import lltype, llarena +from pypy.rpython.memory.gc.base import MovingGCBase +from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE +from pypy.rlib.rarithmetic import LONG_BIT +from pypy.rlib.objectmodel import we_are_translated + +WORD = LONG_BIT // 8 + +first_gcflag = 1 << (LONG_BIT//2) +GCFLAG_BIG = first_gcflag + +# ____________________________________________________________ + +class Gen2GC(MovingGCBase): + _alloc_flavor_ = "raw" + inline_simple_malloc = True + inline_simple_malloc_varsize = True + malloc_zero_filled = True + + HDR = lltype.Struct('header', ('tid', lltype.Signed)) + typeid_is_in_field = 'tid' + #withhash_flag_is_in_field = 'tid', _GCFLAG_HASH_BASE * 0x2 + + TRANSLATION_PARAMS = { + # The size of the nursery. -1 means "auto", which means that it + # will look it up in the env var PYPY_GENERATIONGC_NURSERY and + # fall back to half the size of the L2 cache. + "nursery_size": -1, + + # The system page size. Like obmalloc.c, we assume that it is 4K, + # which is OK for most systems. + "page_size": 4096, + + # The size of an arena. Arenas are groups of pages allocated + # together. + "arena_size": 65536*WORD, + + # The maximum size of an object allocated compactly. All objects + # that are larger are just allocated with raw_malloc(). + "small_request_threshold": 32*WORD, + } + + def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE, + nursery_size=32*WORD, + page_size=16*WORD, + arena_size=48*WORD, + small_request_threshold=5*WORD): + MovingGCBase.__init__(self, config, chunk_size) + self.nursery_size = nursery_size + self.page_size = page_size + self.arena_size = arena_size + self.small_request_threshold = small_request_threshold + + def setup(self): + pass + +# ____________________________________________________________ + +class Arena(object): + _alloc_flavor_ = "raw" + + def __init__(self, arena_size, page_size): + self.page_size = page_size + self.arena_size = arena_size + # 'arena_base' points to the start of malloced memory; it might not + # be a page-aligned address + self.arena_base = llarena.arena_malloc(self.arena_size, False) + if not self.arena_base: + raise MemoryError("couldn't allocate the next arena") + # 'freepages' points to the first unused page + self.freepages = start_of_page(self.arena_base + page_size - 1, + page_size) + # 'nfreepages' is the number of unused pages + arena_end = self.arena_base + self.arena_size + self.nfreepages = (arena_end - self.freepages) / page_size + +# ____________________________________________________________ +# Helpers to go from a pointer to the start of its page + +def start_of_page(addr, page_size): + """Return the address of the start of the page that contains 'addr'.""" + if we_are_translated(): + xxx + else: + return _start_of_page_untranslated(addr, page_size) + +def _start_of_page_untranslated(addr, page_size): + assert isinstance(addr, llarena.fakearenaaddress) + shift = page_size // 2 # for testing, assuming the whole arena is not + # on a page boundary + ofs = ((addr.offset - shift) & ~(page_size-1)) + shift + return llarena.fakearenaaddress(addr.arena, ofs) + +# ____________________________________________________________ + +def nursery_size_from_env(): + return read_from_env('PYPY_GENERATIONGC_NURSERY') Added: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py ============================================================================== --- (empty file) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py Mon Sep 13 14:18:55 2010 @@ -0,0 +1,12 @@ +from pypy.rpython.memory.gc import gen2 + +def test_arena(): + SHIFT = 4 + # + a = gen2.Arena(SHIFT + 8*20, 8) + assert a.freepages == a.arena_base + SHIFT + assert a.nfreepages == 20 + # + a = gen2.Arena(SHIFT + 8*20 + 7, 8) + assert a.freepages == a.arena_base + SHIFT + assert a.nfreepages == 20 From arigo at codespeak.net Mon Sep 13 17:08:09 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 13 Sep 2010 17:08:09 +0200 (CEST) Subject: [pypy-svn] r77043 - in pypy/branch/gen2-gc/pypy/rpython/memory/gc: . test Message-ID: <20100913150809.310BE282C01@codespeak.net> Author: arigo Date: Mon Sep 13 17:08:07 2010 New Revision: 77043 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py Log: Progress. Tests are lagging behind the code :-/ Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py Mon Sep 13 17:08:07 2010 @@ -1,10 +1,12 @@ -from pypy.rpython.lltypesystem import lltype, llarena +from pypy.rpython.lltypesystem import lltype, llmemory, llarena, rffi from pypy.rpython.memory.gc.base import MovingGCBase from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE from pypy.rlib.rarithmetic import LONG_BIT from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.debug import ll_assert WORD = LONG_BIT // 8 +NULL = llmemory.NULL first_gcflag = 1 << (LONG_BIT//2) GCFLAG_BIG = first_gcflag @@ -56,6 +58,10 @@ # ____________________________________________________________ +# Terminology: Arenas are collection of pages; both are fixed-size. +# A page contains a number of allocated objects, called "blocks". + + class Arena(object): _alloc_flavor_ = "raw" @@ -67,12 +73,155 @@ self.arena_base = llarena.arena_malloc(self.arena_size, False) if not self.arena_base: raise MemoryError("couldn't allocate the next arena") - # 'freepages' points to the first unused page - self.freepages = start_of_page(self.arena_base + page_size - 1, - page_size) + # 'freepage' points to the first unused page # 'nfreepages' is the number of unused pages + self.freepage = start_of_page(self.arena_base + page_size - 1, + page_size) arena_end = self.arena_base + self.arena_size - self.nfreepages = (arena_end - self.freepages) / page_size + self.nfreepages = (arena_end - self.freepage) // page_size + self.nuninitializedpages = self.nfreepages + # + # The arenas containing at least one free page are linked in a + # doubly-linked list. We keep this chained list in order: it + # starts with the arenas with the most number of allocated + # pages, so that the least allocated arenas near the end of the + # list have a chance to become completely empty and be freed. + self.nextarena = None + self.prevarena = None + + +# Each initialized page in the arena starts with a PAGE_HEADER. The +# arena typically also contains uninitialized pages at the end. +# Similarily, each page contains blocks of a given size, which can be +# either allocated or freed, and a number of free blocks at the end of +# the page are uninitialized. The free but initialized blocks contain a +# pointer to the next free block, forming a chained list. + +PAGE_PTR = lltype.Ptr(lltype.ForwardReference()) +PAGE_HEADER = lltype.Struct('page_header', + ('nfree', lltype.Signed), # number of free blocks in this page + ('nuninitialized', lltype.Signed), # num. uninitialized blocks (<= nfree) + ('freeblock', llmemory.Address), # first free block, chained list + ('prevpage', PAGE_PTR), # chained list of pages with the same size class + ) +PAGE_PTR.TO.become(PAGE_HEADER) +PAGE_NULL = lltype.nullptr(PAGE_HEADER) + + +class ArenaCollection(object): + _alloc_flavor_ = "raw" + + def __init__(self, arena_size, page_size, small_request_threshold): + self.arena_size = arena_size + self.page_size = page_size + # + # 'pageaddr_for_size': for each size N between WORD and + # small_request_threshold (included), contains either NULL or + # a pointer to a page that has room for at least one more + # allocation of the given size. + length = small_request_threshold / WORD + 1 + self.page_for_size = lltype.malloc(rffi.CArray(PAGE_PTR), length, + flavor='raw', zero=True) + self.arenas_start = None # the most allocated (but not full) arena + self.arenas_end = None # the least allocated (but not empty) arena + + + def malloc(self, size): + """Allocate a block from a page in an arena.""" + ll_assert(size > 0, "malloc: size is null or negative") + ll_assert(size <= self.small_request_threshold, "malloc: size too big") + ll_assert((size & (WORD-1)) == 0, "malloc: size is not aligned") + # + # Get the page to use from the size + size_class = size / WORD + page = self.page_for_size[size_class] + if page == PAGE_NULL: + page = self.allocate_new_page(size_class) + # + # The result is simply 'page.freeblock' + ll_assert(page.nfree > 0, "page_for_size lists a page with nfree <= 0") + result = page.freeblock + page.nfree -= 1 + if page.nfree == 0: + # + # This was the last free block, so unlink the page from the + # chained list. + self.page_for_size[size_class] = page.prevpage + # + else: + # This was not the last free block, so update 'page.freeblock' + # to point to the next free block. Two cases here... + if page.nfree < page.nuninitialized: + # The 'result' was not initialized at all. We must compute + # the next free block by adding 'size' to 'page.freeblock'. + page.freeblock = result + size + page.nuninitialized -= 1 + ll_assert(page.nfree == page.nuninitialized, + "bad value of page.nuninitialized") + else: + # The 'result' was part of the chained list; read the next. + page.freeblock = result.address[0] + # + return result + + + def allocate_new_page(self, size_class): + """Allocate a new page for the given size_class.""" + # + # Get the arena with the highest number of pages already allocated + arena = self.arenas_start + if arena is None: + # No arenas. Get a fresh new arena. + ll_assert(self.arenas_end is None, "!arenas_start && arenas_end") + arena = Arena(self.arena_size, self.page_size) + self.arenas_start = arena + self.arenas_end = arena + # + # Get the page from there (same logic as in malloc() except on + # pages instead of on blocks) + result = arena.freepage + arena.nfreepages -= 1 + if arena.nfreepages == 0: + # + # This was the last free page, so unlink the arena from the + # chained list. + self.arenas_start = arena.nextarena + if self.arenas_start is None: + self.arenas_end = None + else: + self.arenas_start.prevarena = None + # + else: + # This was not the last free page, so update 'arena.freepage' + # to point to the next free page. Two cases here... + if arena.nfreepages < arena.nuninitializedpages: + # The 'result' was not initialized at all. We must compute + # the next free page by adding 'page_size' to 'arena.freepage'. + arena.freepage = result + self.page_size + arena.nuninitializedpages -= 1 + ll_assert(arena.nfreepages == arena.nuninitializedpages, + "bad value of page.nuninitialized") + else: + # The 'result' was part of the chained list; read the next. + arena.freepage = result.address[0] + llarena.arena_reset(result, + llmemory.sizeof(llmemory.Address), + False) + # + # Initialize the fields of the resulting page + llarena.arena_reserve(result, llmemory.sizeof(PAGE_HEADER)) + page = llmemory.cast_adr_to_ptr(result, PAGE_PTR) + # + hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) + page.nfree = ((self.page_size - hdrsize) / WORD) // size_class + # + page.nuninitialized = page.nfree + page.freeblock = result + hdrsize + page.prevpage = PAGE_NULL + ll_assert(self.page_for_size[size_class] == PAGE_NULL, + "allocate_new_page() called but a page is already waiting") + self.page_for_size[size_class] = page + return page # ____________________________________________________________ # Helpers to go from a pointer to the start of its page @@ -86,8 +235,8 @@ def _start_of_page_untranslated(addr, page_size): assert isinstance(addr, llarena.fakearenaaddress) - shift = page_size // 2 # for testing, assuming the whole arena is not - # on a page boundary + shift = 4 # for testing, we assume that the whole arena is not + # on a page boundary ofs = ((addr.offset - shift) & ~(page_size-1)) + shift return llarena.fakearenaaddress(addr.arena, ofs) Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py Mon Sep 13 17:08:07 2010 @@ -456,3 +456,7 @@ def test_varsized_from_prebuilt_gc(self): DirectGCTest.test_varsized_from_prebuilt_gc(self) test_varsized_from_prebuilt_gc.GC_PARAMS = {'space_size': 3 * 1024 * WORD} + + +class TestGen2GC(DirectGCTest): + from pypy.rpython.memory.gc.gen2 import Gen2GC as GCClass Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py Mon Sep 13 17:08:07 2010 @@ -1,12 +1,157 @@ from pypy.rpython.memory.gc import gen2 +from pypy.rpython.memory.gc.gen2 import WORD, PAGE_NULL, PAGE_HEADER, PAGE_PTR +from pypy.rpython.lltypesystem import lltype, llmemory, llarena -def test_arena(): - SHIFT = 4 - # +SHIFT = 4 +hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) + + +def test_allocate_arena(): a = gen2.Arena(SHIFT + 8*20, 8) - assert a.freepages == a.arena_base + SHIFT + assert a.freepage == a.arena_base + SHIFT assert a.nfreepages == 20 + assert a.nuninitializedpages == 20 + assert a.prevarena is None + assert a.nextarena is None # a = gen2.Arena(SHIFT + 8*20 + 7, 8) - assert a.freepages == a.arena_base + SHIFT + assert a.freepage == a.arena_base + SHIFT assert a.nfreepages == 20 + assert a.nuninitializedpages == 20 + assert a.prevarena is None + assert a.nextarena is None + + +def test_allocate_new_page(): + pagesize = hdrsize + 16 + arenasize = pagesize * 4 - 1 + # + def checknewpage(page, size_class): + size = WORD * size_class + assert page.nfree == (pagesize - hdrsize) // size + assert page.nuninitialized == page.nfree + page2 = page.freeblock - hdrsize + assert llmemory.cast_ptr_to_adr(page) == page2 + assert page.prevpage == PAGE_NULL + # + ac = gen2.ArenaCollection(arenasize, pagesize, 99) + assert ac.arenas_start is ac.arenas_end is None + # + page = ac.allocate_new_page(5) + checknewpage(page, 5) + a = ac.arenas_start + assert a is not None + assert a is ac.arenas_end + assert a.nfreepages == 2 + assert a.freepage == a.arena_base + SHIFT + pagesize + assert ac.page_for_size[5] == page + # + page = ac.allocate_new_page(3) + checknewpage(page, 3) + assert a is ac.arenas_start is ac.arenas_end + assert a.nfreepages == 1 + assert a.freepage == a.arena_base + SHIFT + 2*pagesize + assert ac.page_for_size[3] == page + # + page = ac.allocate_new_page(4) + checknewpage(page, 4) + assert ac.arenas_start is ac.arenas_end is None # has been unlinked + assert ac.page_for_size[4] == page + + +def arena_collection_for_test(pagesize, *pagelayouts): + nb_pages = len(pagelayouts[0]) + arenasize = pagesize * (nb_pages + 1) - 1 + ac = gen2.ArenaCollection(arenasize, pagesize, 9*WORD) + # + def link(pageaddr, size_class, size_block, nblocks, nusedblocks): + llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER)) + page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR) + page.nfree = nblocks - nusedblocks + page.nuninitialized = page.nfree + page.freeblock = pageaddr + hdrsize + nusedblocks * size_block + page.prevpage = ac.page_for_size[size_class] + ac.page_for_size[size_class] = page + # + alist = [] + for layout in pagelayouts: + assert len(layout) == nb_pages + assert " " not in layout.rstrip(" ") + a = gen2.Arena(arenasize, pagesize) + alist.append(a) + assert lltype.typeOf(a.freepage) == llmemory.Address + startpageaddr = a.freepage + a.freepage += pagesize * min((layout + " ").index(" "), + (layout + ".").index(".")) + a.nfreepages = layout.count(" ") + layout.count(".") + a.nuninitializedpages = layout.count(" ") + # + pageaddr = startpageaddr + for i, c in enumerate(layout): + if '1' <= c <= '9': # a partially used page (1 block free) + size_class = int(c) + size_block = WORD * size_class + nblocks = (pagesize - hdrsize) // size_block + link(pageaddr, size_class, size_block, nblocks, nblocks-1) + elif c == '.': # a free, but initialized, page + next_free_num = min((layout + " ").find(" ", i+1), + (layout + ".").find(".", i+1)) + addr = startpageaddr + pagesize * next_free_num + llarena.arena_reserve(pageaddr, + llmemory.sizeof(llmemory.Address)) + pageaddr.address[0] = addr + elif c == '#': # a random full page, not in any linked list + pass + elif c == ' ': # the tail is uninitialized free pages + break + pageaddr += pagesize + # + assert alist == sorted(alist, key=lambda a: a.nfreepages) + # + ac.arenas_start = alist[0] + ac.arenas_end = alist[-1] + for a, b in zip(alist[:-1], alist[1:]): + a.nextarena = b + b.prevarena = a + return ac + + +def getarena(ac, num, total=None): + if total is not None: + a = getarena(ac, total-1) + assert a is ac.arenas_end + assert a.nextarena is None + prev = None + a = ac.arenas_start + for i in range(num): + assert a.prevarena is prev + prev = a + a = a.nextarena + return a + +def checkpage(ac, page, arena, nb_page): + pageaddr = llmemory.cast_ptr_to_adr(page) + assert pageaddr == arena.arena_base + SHIFT + nb_page * ac.page_size + + +def test_simple_arena_collection(): + # Test supposing that we have two partially-used arenas + pagesize = hdrsize + 16 + ac = arena_collection_for_test(pagesize, + "##.. ", + ".# ") + assert ac.arenas_start.nfreepages == 3 + assert ac.arenas_end.nfreepages == 4 + # + a0 = getarena(ac, 0, 2) + a1 = getarena(ac, 1, 2) + page = ac.allocate_new_page(1); checkpage(ac, page, a0, 2) + page = ac.allocate_new_page(2); checkpage(ac, page, a0, 3) + assert getarena(ac, 0, 2) is a0 + page = ac.allocate_new_page(3); checkpage(ac, page, a0, 4) + assert getarena(ac, 0, 1) is a1 + page = ac.allocate_new_page(4); checkpage(ac, page, a1, 0) + page = ac.allocate_new_page(5); checkpage(ac, page, a1, 2) + page = ac.allocate_new_page(6); checkpage(ac, page, a1, 3) + page = ac.allocate_new_page(7); checkpage(ac, page, a1, 4) + assert ac.arenas_start is ac.arenas_end is None From arigo at codespeak.net Mon Sep 13 17:29:55 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 13 Sep 2010 17:29:55 +0200 (CEST) Subject: [pypy-svn] r77044 - in pypy/branch/gen2-gc/pypy/rpython/memory/gc: . test Message-ID: <20100913152955.04784282C01@codespeak.net> Author: arigo Date: Mon Sep 13 17:29:54 2010 New Revision: 77044 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py Log: More tests. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py Mon Sep 13 17:29:54 2010 @@ -114,6 +114,7 @@ def __init__(self, arena_size, page_size, small_request_threshold): self.arena_size = arena_size self.page_size = page_size + self.small_request_threshold = small_request_threshold # # 'pageaddr_for_size': for each size N between WORD and # small_request_threshold (included), contains either NULL or @@ -237,7 +238,7 @@ assert isinstance(addr, llarena.fakearenaaddress) shift = 4 # for testing, we assume that the whole arena is not # on a page boundary - ofs = ((addr.offset - shift) & ~(page_size-1)) + shift + ofs = ((addr.offset - shift) // page_size) * page_size + shift return llarena.fakearenaaddress(addr.arena, ofs) # ____________________________________________________________ Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py Mon Sep 13 17:29:54 2010 @@ -143,15 +143,71 @@ assert ac.arenas_start.nfreepages == 3 assert ac.arenas_end.nfreepages == 4 # - a0 = getarena(ac, 0, 2) - a1 = getarena(ac, 1, 2) + a0 = getarena(ac, 0, total=2) + a1 = getarena(ac, 1, total=2) page = ac.allocate_new_page(1); checkpage(ac, page, a0, 2) page = ac.allocate_new_page(2); checkpage(ac, page, a0, 3) - assert getarena(ac, 0, 2) is a0 + assert getarena(ac, 0, total=2) is a0 page = ac.allocate_new_page(3); checkpage(ac, page, a0, 4) - assert getarena(ac, 0, 1) is a1 + assert getarena(ac, 0, total=1) is a1 page = ac.allocate_new_page(4); checkpage(ac, page, a1, 0) page = ac.allocate_new_page(5); checkpage(ac, page, a1, 2) page = ac.allocate_new_page(6); checkpage(ac, page, a1, 3) page = ac.allocate_new_page(7); checkpage(ac, page, a1, 4) assert ac.arenas_start is ac.arenas_end is None + + +def checkobj(arena, num_page, pos_obj, obj): + pageaddr = arena.arena_base + SHIFT + num_page * arena.page_size + assert obj == pageaddr + hdrsize + pos_obj + + +def test_malloc_common_case(): + pagesize = hdrsize + 7*WORD + ac = arena_collection_for_test(pagesize, "#23..2 ") + a0 = getarena(ac, 0, total=1) + obj = ac.malloc(2*WORD); checkobj(a0, 5, 4*WORD, obj) + obj = ac.malloc(2*WORD); checkobj(a0, 1, 4*WORD, obj) + obj = ac.malloc(2*WORD); checkobj(a0, 3, 0*WORD, obj) + obj = ac.malloc(2*WORD); checkobj(a0, 3, 2*WORD, obj) + obj = ac.malloc(2*WORD); checkobj(a0, 3, 4*WORD, obj) + obj = ac.malloc(2*WORD); checkobj(a0, 4, 0*WORD, obj) + obj = ac.malloc(2*WORD); checkobj(a0, 4, 2*WORD, obj) + obj = ac.malloc(2*WORD); checkobj(a0, 4, 4*WORD, obj) + obj = ac.malloc(2*WORD); checkobj(a0, 6, 0*WORD, obj) + obj = ac.malloc(2*WORD); checkobj(a0, 6, 2*WORD, obj) + obj = ac.malloc(2*WORD); checkobj(a0, 6, 4*WORD, obj) + +def test_malloc_mixed_sizes(): + pagesize = hdrsize + 7*WORD + ac = arena_collection_for_test(pagesize, "#23..2 ") + a0 = getarena(ac, 0, total=1) + obj = ac.malloc(2*WORD); checkobj(a0, 5, 4*WORD, obj) + obj = ac.malloc(3*WORD); checkobj(a0, 2, 3*WORD, obj) + obj = ac.malloc(2*WORD); checkobj(a0, 1, 4*WORD, obj) + obj = ac.malloc(3*WORD); checkobj(a0, 3, 0*WORD, obj) # 3rd page -> size 3 + obj = ac.malloc(2*WORD); checkobj(a0, 4, 0*WORD, obj) # 4th page -> size 2 + obj = ac.malloc(3*WORD); checkobj(a0, 3, 3*WORD, obj) + obj = ac.malloc(2*WORD); checkobj(a0, 4, 2*WORD, obj) + obj = ac.malloc(3*WORD); checkobj(a0, 6, 0*WORD, obj) # 6th page -> size 3 + obj = ac.malloc(2*WORD); checkobj(a0, 4, 4*WORD, obj) + obj = ac.malloc(3*WORD); checkobj(a0, 6, 3*WORD, obj) + +def test_malloc_new_arena(): + pagesize = hdrsize + 7*WORD + ac = arena_collection_for_test(pagesize, "#23..2 ") + a0 = getarena(ac, 0, total=1) + obj = ac.malloc(5*WORD); checkobj(a0, 3, 0*WORD, obj) # 3rd page -> size 5 + obj = ac.malloc(4*WORD); checkobj(a0, 4, 0*WORD, obj) # 4th page -> size 4 + obj = ac.malloc(1*WORD); checkobj(a0, 6, 0*WORD, obj) # 6th page -> size 1 + assert ac.arenas_start is ac.arenas_end is None # no more free page + obj = ac.malloc(1*WORD); checkobj(a0, 6, 1*WORD, obj) + obj = ac.malloc(5*WORD) + a1 = getarena(ac, 0, total=1) + pass; checkobj(a1, 0, 0*WORD, obj) # a1/0 -> size 5 + obj = ac.malloc(1*WORD); checkobj(a0, 6, 2*WORD, obj) + obj = ac.malloc(5*WORD); checkobj(a1, 1, 0*WORD, obj) # a1/1 -> size 5 + obj = ac.malloc(1*WORD); checkobj(a0, 6, 3*WORD, obj) + obj = ac.malloc(2*WORD); checkobj(a0, 5, 4*WORD, obj) + obj = ac.malloc(2*WORD); checkobj(a0, 1, 4*WORD, obj) + obj = ac.malloc(2*WORD); checkobj(a1, 2, 0*WORD, obj) # a1/2 -> size 2 From arigo at codespeak.net Mon Sep 13 19:12:05 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 13 Sep 2010 19:12:05 +0200 (CEST) Subject: [pypy-svn] r77046 - in pypy/branch/gen2-gc/pypy/rpython/memory/gc: . test Message-ID: <20100913171205.34EDF282C01@codespeak.net> Author: arigo Date: Mon Sep 13 19:12:03 2010 New Revision: 77046 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py Log: Change Arenas to no longer be a class, but just a Struct. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py Mon Sep 13 19:12:03 2010 @@ -58,36 +58,22 @@ # ____________________________________________________________ -# Terminology: Arenas are collection of pages; both are fixed-size. +# Terminology: "Arenas" are collection of "pages"; both are fixed-size. # A page contains a number of allocated objects, called "blocks". -class Arena(object): - _alloc_flavor_ = "raw" - - def __init__(self, arena_size, page_size): - self.page_size = page_size - self.arena_size = arena_size - # 'arena_base' points to the start of malloced memory; it might not - # be a page-aligned address - self.arena_base = llarena.arena_malloc(self.arena_size, False) - if not self.arena_base: - raise MemoryError("couldn't allocate the next arena") - # 'freepage' points to the first unused page - # 'nfreepages' is the number of unused pages - self.freepage = start_of_page(self.arena_base + page_size - 1, - page_size) - arena_end = self.arena_base + self.arena_size - self.nfreepages = (arena_end - self.freepage) // page_size - self.nuninitializedpages = self.nfreepages - # - # The arenas containing at least one free page are linked in a - # doubly-linked list. We keep this chained list in order: it - # starts with the arenas with the most number of allocated - # pages, so that the least allocated arenas near the end of the - # list have a chance to become completely empty and be freed. - self.nextarena = None - self.prevarena = None +ARENA_PTR = lltype.Ptr(lltype.ForwardReference()) +ARENA = lltype.Struct('Arena', + ('arena_base', llmemory.Address), # see allocate_arena() for a description + ('freepage', llmemory.Address), + ('nfreepages', lltype.Signed), + ('nuninitializedpages', lltype.Signed), + ('nextarena', ARENA_PTR), + ('prevarena', ARENA_PTR), + ('arena_index', lltype.Signed), + ) +ARENA_PTR.TO.become(ARENA) +ARENA_NULL = lltype.nullptr(ARENA) # Each initialized page in the arena starts with a PAGE_HEADER. The @@ -98,16 +84,62 @@ # pointer to the next free block, forming a chained list. PAGE_PTR = lltype.Ptr(lltype.ForwardReference()) -PAGE_HEADER = lltype.Struct('page_header', +PAGE_HEADER = lltype.Struct('PageHeader', + ('nextpage', PAGE_PTR), # chained list of pages with the same size class + ('prevpage', PAGE_PTR), # "", but not initialized for the head of list! ('nfree', lltype.Signed), # number of free blocks in this page ('nuninitialized', lltype.Signed), # num. uninitialized blocks (<= nfree) ('freeblock', llmemory.Address), # first free block, chained list - ('prevpage', PAGE_PTR), # chained list of pages with the same size class + ('arena_index', lltype.Signed), # index of the arena in 'all_arenas' ) PAGE_PTR.TO.become(PAGE_HEADER) PAGE_NULL = lltype.nullptr(PAGE_HEADER) +def allocate_arena(arena_size, page_size): + # 'arena_base' points to the start of malloced memory; it might not + # be a page-aligned address + arena_base = llarena.arena_malloc(arena_size, False) + if not arena_base: + raise MemoryError("couldn't allocate the next arena") + # + # 'freepage' points to the first unused page + freepage = start_of_page(arena_base + page_size - 1, page_size) + # + # we stick the ARENA structure either at the start or at the end + # of the big arena, depending on alignment of the malloc'ed memory + arena_end = arena_base + arena_size + struct_size = llmemory.raw_malloc_usage(llmemory.sizeof(ARENA)) + if freepage - arena_base >= struct_size: + arena_addr = arena_base + else: + arena_end -= struct_size + arena_addr = arena_end + # + llarena.arena_reserve(arena_addr, llmemory.sizeof(ARENA), False) + arena = llmemory.cast_adr_to_ptr(arena_addr, ARENA_PTR) + # + arena.arena_base = arena_base + arena.freepage = freepage + # 'nfreepages' is the number of unused pages + arena.nfreepages = (arena_end - freepage) // page_size + arena.nuninitializedpages = arena.nfreepages + # + # The arenas containing at least one free page are linked in a + # doubly-linked list. We keep this chained list in order: it + # starts with the arenas with the most number of allocated + # pages, so that the least allocated arenas near the end of the + # list have a chance to become completely empty and be freed. + arena.nextarena = ARENA_NULL + arena.prevarena = ARENA_NULL + return arena + +def free_arena(arena): + llarena.arena_free(arena.arena_base) + +# ____________________________________________________________ + + class ArenaCollection(object): _alloc_flavor_ = "raw" @@ -123,8 +155,19 @@ length = small_request_threshold / WORD + 1 self.page_for_size = lltype.malloc(rffi.CArray(PAGE_PTR), length, flavor='raw', zero=True) - self.arenas_start = None # the most allocated (but not full) arena - self.arenas_end = None # the least allocated (but not empty) arena + self.nblocks_for_size = lltype.malloc(rffi.CArray(lltype.Signed), + length, flavor='raw') + hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) + for i in range(1, length): + self.nblocks_for_size[i] = (page_size - hdrsize) // (WORD * i) + # + self.arenas_start = ARENA_NULL #the most allocated (but not full) arena + self.arenas_end = ARENA_NULL #the least allocated(but not empty)arena + # + self.all_arenas = lltype.malloc(rffi.CArray(ARENA_PTR), 0, + flavor='raw') + self.all_arenas_size = 0 + self.all_arenas_next = 0 def malloc(self, size): @@ -147,7 +190,7 @@ # # This was the last free block, so unlink the page from the # chained list. - self.page_for_size[size_class] = page.prevpage + self.page_for_size[size_class] = page.nextpage # else: # This was not the last free block, so update 'page.freeblock' @@ -162,7 +205,11 @@ else: # The 'result' was part of the chained list; read the next. page.freeblock = result.address[0] + llarena.arena_reset(result, + llmemory.sizeof(llmemory.Address), + False) # + llarena.arena_reserve(result, _dummy_size(size), False) return result @@ -171,12 +218,11 @@ # # Get the arena with the highest number of pages already allocated arena = self.arenas_start - if arena is None: + if arena == ARENA_NULL: # No arenas. Get a fresh new arena. - ll_assert(self.arenas_end is None, "!arenas_start && arenas_end") - arena = Arena(self.arena_size, self.page_size) - self.arenas_start = arena - self.arenas_end = arena + ll_assert(self.arenas_end == ARENA_NULL, + "!arenas_start && arenas_end") + arena = self.allocate_new_arena() # # Get the page from there (same logic as in malloc() except on # pages instead of on blocks) @@ -187,10 +233,10 @@ # This was the last free page, so unlink the arena from the # chained list. self.arenas_start = arena.nextarena - if self.arenas_start is None: - self.arenas_end = None + if self.arenas_start == ARENA_NULL: + self.arenas_end = ARENA_NULL else: - self.arenas_start.prevarena = None + self.arenas_start.prevarena = ARENA_NULL # else: # This was not the last free page, so update 'arena.freepage' @@ -201,7 +247,7 @@ arena.freepage = result + self.page_size arena.nuninitializedpages -= 1 ll_assert(arena.nfreepages == arena.nuninitializedpages, - "bad value of page.nuninitialized") + "bad value of arena.nuninitializedpages") else: # The 'result' was part of the chained list; read the next. arena.freepage = result.address[0] @@ -214,16 +260,104 @@ page = llmemory.cast_adr_to_ptr(result, PAGE_PTR) # hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) - page.nfree = ((self.page_size - hdrsize) / WORD) // size_class - # + page.nfree = self.nblocks_for_size[size_class] page.nuninitialized = page.nfree page.freeblock = result + hdrsize - page.prevpage = PAGE_NULL + page.nextpage = PAGE_NULL ll_assert(self.page_for_size[size_class] == PAGE_NULL, "allocate_new_page() called but a page is already waiting") self.page_for_size[size_class] = page return page + + def allocate_new_arena(self): + arena = allocate_arena(self.arena_size, self.page_size) + self.arenas_start = arena + self.arenas_end = arena + # + # Search the next free entry in the 'all_arenas' array + i = self.all_arenas_next + size = self.all_arenas_size + count = size + while count > 0: + if self.all_arenas[i] == ARENA_NULL: + break # 'i' is the free entry + count -= 1 + i += 1 + if i == size: + i = 0 + else: + # + # No more free entry. Resize the array to get some space. + newsize = (size + 3) * 2 + copy = lltype.malloc(rffi.CArray(ARENA_PTR), newsize, + flavor='raw', zero=True) + i = 0 + while i < size: + copy[i] = self.all_arenas[i] + i += 1 + # 'i' is equal to the old 'size', so it's now a free entry + lltype.free(self.all_arenas, flavor='raw') + self.all_arenas = copy + self.all_arenas_size = newsize + # + self.all_arenas_next = i + arena.arena_index = i + self.all_arenas[i] = arena + return arena + + + def free(self, obj, size): + """Free a previously malloc'ed block.""" + ll_assert(size > 0, "free: size is null or negative") + ll_assert(size <= self.small_request_threshold, "free: size too big") + ll_assert((size & (WORD-1)) == 0, "free: size is not aligned") + # + llarena.arena_reset(obj, _dummy_size(size), False) + pageaddr = start_of_page(obj, self.page_size) + if not we_are_translated(): + hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) + assert obj - pageaddr >= hdrsize + assert (obj - pageaddr - hdrsize) % size == 0 + page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR) + size_class = size / WORD + # + # Increment the number of known free objects + nfree = page.nfree + 1 + if nfree < self.nblocks_for_size[size_class]: + # + # Not all objects in this page are freed yet. + # Add the free block to the chained list. + page.nfree = nfree + llarena.arena_reserve(obj, llmemory.sizeof(llmemory.Address), + False) + obj.address[0] = page.freeblock + page.freeblock = obj + # + # If the page was full, then it now has space and should be + # linked back in the page_for_size[] linked list. + if nfree == 1: + page.nextpage = self.page_for_size[size_class] + if page.nextpage != PAGE_NULL: + page.nextpage.prevpage = page + self.page_for_size[size_class] = page + # + else: + # The page becomes completely free. Remove it from + # the page_for_size[] linked list. + if page == self.page_for_size[size_class]: + self.page_for_size[size_class] = page.nextpage + else: + prev = page.prevpage + next = page.nextpage + prev.nextpage = next + next.prevpage = prev + # + # Free the page, putting it back in the chained list of the arena + # where it belongs + xxx#... + + # ____________________________________________________________ # Helpers to go from a pointer to the start of its page @@ -241,6 +375,13 @@ ofs = ((addr.offset - shift) // page_size) * page_size + shift return llarena.fakearenaaddress(addr.arena, ofs) +def _dummy_size(size): + if we_are_translated(): + return size + if isinstance(size, int): + size = llmemory.sizeof(lltype.Char) * size + return size + # ____________________________________________________________ def nursery_size_from_env(): Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py Mon Sep 13 19:12:03 2010 @@ -1,25 +1,27 @@ from pypy.rpython.memory.gc import gen2 from pypy.rpython.memory.gc.gen2 import WORD, PAGE_NULL, PAGE_HEADER, PAGE_PTR +from pypy.rpython.memory.gc.gen2 import ARENA, ARENA_NULL from pypy.rpython.lltypesystem import lltype, llmemory, llarena SHIFT = 4 hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) +arenasize = llmemory.raw_malloc_usage(llmemory.sizeof(ARENA)) def test_allocate_arena(): - a = gen2.Arena(SHIFT + 8*20, 8) + a = gen2.allocate_arena(SHIFT + 8*20 + arenasize, 8) assert a.freepage == a.arena_base + SHIFT assert a.nfreepages == 20 assert a.nuninitializedpages == 20 - assert a.prevarena is None - assert a.nextarena is None + assert a.prevarena == ARENA_NULL + assert a.nextarena == ARENA_NULL # - a = gen2.Arena(SHIFT + 8*20 + 7, 8) + a = gen2.allocate_arena(SHIFT + 8*20 + 7 + arenasize, 8) assert a.freepage == a.arena_base + SHIFT assert a.nfreepages == 20 assert a.nuninitializedpages == 20 - assert a.prevarena is None - assert a.nextarena is None + assert a.prevarena == ARENA_NULL + assert a.nextarena == ARENA_NULL def test_allocate_new_page(): @@ -32,30 +34,30 @@ assert page.nuninitialized == page.nfree page2 = page.freeblock - hdrsize assert llmemory.cast_ptr_to_adr(page) == page2 - assert page.prevpage == PAGE_NULL + assert page.nextpage == PAGE_NULL # ac = gen2.ArenaCollection(arenasize, pagesize, 99) - assert ac.arenas_start is ac.arenas_end is None + assert ac.arenas_start == ac.arenas_end == ARENA_NULL # page = ac.allocate_new_page(5) checknewpage(page, 5) a = ac.arenas_start - assert a is not None - assert a is ac.arenas_end + assert a != ARENA_NULL + assert a == ac.arenas_end assert a.nfreepages == 2 assert a.freepage == a.arena_base + SHIFT + pagesize assert ac.page_for_size[5] == page # page = ac.allocate_new_page(3) checknewpage(page, 3) - assert a is ac.arenas_start is ac.arenas_end + assert a == ac.arenas_start == ac.arenas_end assert a.nfreepages == 1 assert a.freepage == a.arena_base + SHIFT + 2*pagesize assert ac.page_for_size[3] == page # page = ac.allocate_new_page(4) checknewpage(page, 4) - assert ac.arenas_start is ac.arenas_end is None # has been unlinked + assert ac.arenas_start == ac.arenas_end == ARENA_NULL # has been unlinked assert ac.page_for_size[4] == page @@ -70,14 +72,16 @@ page.nfree = nblocks - nusedblocks page.nuninitialized = page.nfree page.freeblock = pageaddr + hdrsize + nusedblocks * size_block - page.prevpage = ac.page_for_size[size_class] + page.nextpage = ac.page_for_size[size_class] ac.page_for_size[size_class] = page + if page.nextpage: + page.nextpage.prevpage = page # alist = [] for layout in pagelayouts: assert len(layout) == nb_pages assert " " not in layout.rstrip(" ") - a = gen2.Arena(arenasize, pagesize) + a = gen2.allocate_arena(arenasize, pagesize) alist.append(a) assert lltype.typeOf(a.freepage) == llmemory.Address startpageaddr = a.freepage @@ -119,12 +123,12 @@ def getarena(ac, num, total=None): if total is not None: a = getarena(ac, total-1) - assert a is ac.arenas_end - assert a.nextarena is None - prev = None + assert a == ac.arenas_end + assert a.nextarena == ARENA_NULL + prev = ARENA_NULL a = ac.arenas_start for i in range(num): - assert a.prevarena is prev + assert a.prevarena == prev prev = a a = a.nextarena return a @@ -147,18 +151,18 @@ a1 = getarena(ac, 1, total=2) page = ac.allocate_new_page(1); checkpage(ac, page, a0, 2) page = ac.allocate_new_page(2); checkpage(ac, page, a0, 3) - assert getarena(ac, 0, total=2) is a0 + assert getarena(ac, 0, total=2) == a0 page = ac.allocate_new_page(3); checkpage(ac, page, a0, 4) - assert getarena(ac, 0, total=1) is a1 + assert getarena(ac, 0, total=1) == a1 page = ac.allocate_new_page(4); checkpage(ac, page, a1, 0) page = ac.allocate_new_page(5); checkpage(ac, page, a1, 2) page = ac.allocate_new_page(6); checkpage(ac, page, a1, 3) page = ac.allocate_new_page(7); checkpage(ac, page, a1, 4) - assert ac.arenas_start is ac.arenas_end is None + assert ac.arenas_start == ac.arenas_end == ARENA_NULL -def checkobj(arena, num_page, pos_obj, obj): - pageaddr = arena.arena_base + SHIFT + num_page * arena.page_size +def ckob(ac, arena, num_page, pos_obj, obj): + pageaddr = arena.arena_base + SHIFT + num_page * ac.page_size assert obj == pageaddr + hdrsize + pos_obj @@ -166,48 +170,48 @@ pagesize = hdrsize + 7*WORD ac = arena_collection_for_test(pagesize, "#23..2 ") a0 = getarena(ac, 0, total=1) - obj = ac.malloc(2*WORD); checkobj(a0, 5, 4*WORD, obj) - obj = ac.malloc(2*WORD); checkobj(a0, 1, 4*WORD, obj) - obj = ac.malloc(2*WORD); checkobj(a0, 3, 0*WORD, obj) - obj = ac.malloc(2*WORD); checkobj(a0, 3, 2*WORD, obj) - obj = ac.malloc(2*WORD); checkobj(a0, 3, 4*WORD, obj) - obj = ac.malloc(2*WORD); checkobj(a0, 4, 0*WORD, obj) - obj = ac.malloc(2*WORD); checkobj(a0, 4, 2*WORD, obj) - obj = ac.malloc(2*WORD); checkobj(a0, 4, 4*WORD, obj) - obj = ac.malloc(2*WORD); checkobj(a0, 6, 0*WORD, obj) - obj = ac.malloc(2*WORD); checkobj(a0, 6, 2*WORD, obj) - obj = ac.malloc(2*WORD); checkobj(a0, 6, 4*WORD, obj) + obj = ac.malloc(2*WORD); ckob(ac, a0, 5, 4*WORD, obj) + obj = ac.malloc(2*WORD); ckob(ac, a0, 1, 4*WORD, obj) + obj = ac.malloc(2*WORD); ckob(ac, a0, 3, 0*WORD, obj) + obj = ac.malloc(2*WORD); ckob(ac, a0, 3, 2*WORD, obj) + obj = ac.malloc(2*WORD); ckob(ac, a0, 3, 4*WORD, obj) + obj = ac.malloc(2*WORD); ckob(ac, a0, 4, 0*WORD, obj) + obj = ac.malloc(2*WORD); ckob(ac, a0, 4, 2*WORD, obj) + obj = ac.malloc(2*WORD); ckob(ac, a0, 4, 4*WORD, obj) + obj = ac.malloc(2*WORD); ckob(ac, a0, 6, 0*WORD, obj) + obj = ac.malloc(2*WORD); ckob(ac, a0, 6, 2*WORD, obj) + obj = ac.malloc(2*WORD); ckob(ac, a0, 6, 4*WORD, obj) def test_malloc_mixed_sizes(): pagesize = hdrsize + 7*WORD ac = arena_collection_for_test(pagesize, "#23..2 ") a0 = getarena(ac, 0, total=1) - obj = ac.malloc(2*WORD); checkobj(a0, 5, 4*WORD, obj) - obj = ac.malloc(3*WORD); checkobj(a0, 2, 3*WORD, obj) - obj = ac.malloc(2*WORD); checkobj(a0, 1, 4*WORD, obj) - obj = ac.malloc(3*WORD); checkobj(a0, 3, 0*WORD, obj) # 3rd page -> size 3 - obj = ac.malloc(2*WORD); checkobj(a0, 4, 0*WORD, obj) # 4th page -> size 2 - obj = ac.malloc(3*WORD); checkobj(a0, 3, 3*WORD, obj) - obj = ac.malloc(2*WORD); checkobj(a0, 4, 2*WORD, obj) - obj = ac.malloc(3*WORD); checkobj(a0, 6, 0*WORD, obj) # 6th page -> size 3 - obj = ac.malloc(2*WORD); checkobj(a0, 4, 4*WORD, obj) - obj = ac.malloc(3*WORD); checkobj(a0, 6, 3*WORD, obj) + obj = ac.malloc(2*WORD); ckob(ac, a0, 5, 4*WORD, obj) + obj = ac.malloc(3*WORD); ckob(ac, a0, 2, 3*WORD, obj) + obj = ac.malloc(2*WORD); ckob(ac, a0, 1, 4*WORD, obj) + obj = ac.malloc(3*WORD); ckob(ac, a0, 3, 0*WORD, obj) # 3rd page -> size 3 + obj = ac.malloc(2*WORD); ckob(ac, a0, 4, 0*WORD, obj) # 4th page -> size 2 + obj = ac.malloc(3*WORD); ckob(ac, a0, 3, 3*WORD, obj) + obj = ac.malloc(2*WORD); ckob(ac, a0, 4, 2*WORD, obj) + obj = ac.malloc(3*WORD); ckob(ac, a0, 6, 0*WORD, obj) # 6th page -> size 3 + obj = ac.malloc(2*WORD); ckob(ac, a0, 4, 4*WORD, obj) + obj = ac.malloc(3*WORD); ckob(ac, a0, 6, 3*WORD, obj) def test_malloc_new_arena(): pagesize = hdrsize + 7*WORD ac = arena_collection_for_test(pagesize, "#23..2 ") a0 = getarena(ac, 0, total=1) - obj = ac.malloc(5*WORD); checkobj(a0, 3, 0*WORD, obj) # 3rd page -> size 5 - obj = ac.malloc(4*WORD); checkobj(a0, 4, 0*WORD, obj) # 4th page -> size 4 - obj = ac.malloc(1*WORD); checkobj(a0, 6, 0*WORD, obj) # 6th page -> size 1 - assert ac.arenas_start is ac.arenas_end is None # no more free page - obj = ac.malloc(1*WORD); checkobj(a0, 6, 1*WORD, obj) + obj = ac.malloc(5*WORD); ckob(ac, a0, 3, 0*WORD, obj) # 3rd page -> size 5 + obj = ac.malloc(4*WORD); ckob(ac, a0, 4, 0*WORD, obj) # 4th page -> size 4 + obj = ac.malloc(1*WORD); ckob(ac, a0, 6, 0*WORD, obj) # 6th page -> size 1 + assert ac.arenas_start == ac.arenas_end == ARENA_NULL # no more free page + obj = ac.malloc(1*WORD); ckob(ac, a0, 6, 1*WORD, obj) obj = ac.malloc(5*WORD) a1 = getarena(ac, 0, total=1) - pass; checkobj(a1, 0, 0*WORD, obj) # a1/0 -> size 5 - obj = ac.malloc(1*WORD); checkobj(a0, 6, 2*WORD, obj) - obj = ac.malloc(5*WORD); checkobj(a1, 1, 0*WORD, obj) # a1/1 -> size 5 - obj = ac.malloc(1*WORD); checkobj(a0, 6, 3*WORD, obj) - obj = ac.malloc(2*WORD); checkobj(a0, 5, 4*WORD, obj) - obj = ac.malloc(2*WORD); checkobj(a0, 1, 4*WORD, obj) - obj = ac.malloc(2*WORD); checkobj(a1, 2, 0*WORD, obj) # a1/2 -> size 2 + pass; ckob(ac, a1, 0, 0*WORD, obj) # a1/0 -> size 5 + obj = ac.malloc(1*WORD); ckob(ac, a0, 6, 2*WORD, obj) + obj = ac.malloc(5*WORD); ckob(ac, a1, 1, 0*WORD, obj) # a1/1 -> size 5 + obj = ac.malloc(1*WORD); ckob(ac, a0, 6, 3*WORD, obj) + obj = ac.malloc(2*WORD); ckob(ac, a0, 5, 4*WORD, obj) + obj = ac.malloc(2*WORD); ckob(ac, a0, 1, 4*WORD, obj) + obj = ac.malloc(2*WORD); ckob(ac, a1, 2, 0*WORD, obj) # a1/2 -> size 2 From agaynor at codespeak.net Mon Sep 13 19:27:13 2010 From: agaynor at codespeak.net (agaynor at codespeak.net) Date: Mon, 13 Sep 2010 19:27:13 +0200 (CEST) Subject: [pypy-svn] r77047 - pypy/trunk/pypy/jit/metainterp/optimizeopt Message-ID: <20100913172713.C8C39282C01@codespeak.net> Author: agaynor Date: Mon Sep 13 19:27:12 2010 New Revision: 77047 Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py Log: Don't use relative imports. Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py Mon Sep 13 19:27:12 2010 @@ -1,6 +1,6 @@ from optimizer import Optimization, CONST_1, CONST_0 from pypy.jit.metainterp.optimizeutil import _findall -from intutils import IntBound, IntUnbounded +from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded from pypy.jit.metainterp.history import Const, ConstInt from pypy.jit.metainterp.resoperation import rop, ResOperation From afa at codespeak.net Mon Sep 13 23:11:02 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Mon, 13 Sep 2010 23:11:02 +0200 (CEST) Subject: [pypy-svn] r77049 - in pypy/branch/fast-forward/lib-python: . modified-2.7.0 modified-2.7.0/test Message-ID: <20100913211102.EF253282BD6@codespeak.net> Author: afa Date: Mon Sep 13 23:10:59 2010 New Revision: 77049 Added: pypy/branch/fast-forward/lib-python/modified-2.7.0/opcode.py - copied, changed from r77048, pypy/branch/fast-forward/lib-python/2.7.0/opcode.py pypy/branch/fast-forward/lib-python/modified-2.7.0/sysconfig.py - copied, changed from r77048, pypy/branch/fast-forward/lib-python/2.7.0/sysconfig.py pypy/branch/fast-forward/lib-python/modified-2.7.0/test/regrtest.py - copied, changed from r77048, pypy/branch/fast-forward/lib-python/2.7.0/test/regrtest.py pypy/branch/fast-forward/lib-python/modified-2.7.0/test/test_support.py - copied unchanged from r77048, pypy/branch/fast-forward/lib-python/2.7.0/test/test_support.py pypy/branch/fast-forward/lib-python/modified-2.7.0/test/test_sys.py - copied, changed from r77048, pypy/branch/fast-forward/lib-python/2.7.0/test/test_sys.py Modified: pypy/branch/fast-forward/lib-python/ (props changed) Log: Import the CPython 2.7.0 library. a "svn:externals" is not practical here, since there is no way to track and merge the eventual changes into modified-2.7.0 Copied: pypy/branch/fast-forward/lib-python/modified-2.7.0/opcode.py (from r77048, pypy/branch/fast-forward/lib-python/2.7.0/opcode.py) ============================================================================== --- pypy/branch/fast-forward/lib-python/2.7.0/opcode.py (original) +++ pypy/branch/fast-forward/lib-python/modified-2.7.0/opcode.py Mon Sep 13 23:10:59 2010 @@ -189,4 +189,9 @@ def_op('SET_ADD', 146) def_op('MAP_ADD', 147) +# pypy modification, experimental bytecode +def_op('CALL_LIKELY_BUILTIN', 200) # #args + (#kwargs << 8) +def_op('LOOKUP_METHOD', 201) # Index in name list +def_op('CALL_METHOD', 202) # #args not including 'self' + del def_op, name_op, jrel_op, jabs_op Copied: pypy/branch/fast-forward/lib-python/modified-2.7.0/sysconfig.py (from r77048, pypy/branch/fast-forward/lib-python/2.7.0/sysconfig.py) ============================================================================== --- pypy/branch/fast-forward/lib-python/2.7.0/sysconfig.py (original) +++ pypy/branch/fast-forward/lib-python/modified-2.7.0/sysconfig.py Mon Sep 13 23:10:59 2010 @@ -175,135 +175,9 @@ return env_base if env_base else joinuser("~", ".local") -def _parse_makefile(filename, vars=None): - """Parse a Makefile-style file. - - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - import re - # Regexes needed for parsing Makefile (and similar syntaxes, - # like old-style Setup files). - _variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") - _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") - _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") - - if vars is None: - vars = {} - done = {} - notdone = {} - - with open(filename) as f: - lines = f.readlines() - - for line in lines: - if line.startswith('#') or line.strip() == '': - continue - m = _variable_rx.match(line) - if m: - n, v = m.group(1, 2) - v = v.strip() - # `$$' is a literal `$' in make - tmpv = v.replace('$$', '') - - if "$" in tmpv: - notdone[n] = v - else: - try: - v = int(v) - except ValueError: - # insert literal `$' - done[n] = v.replace('$$', '$') - else: - done[n] = v - - # do variable interpolation here - while notdone: - for name in notdone.keys(): - value = notdone[name] - m = _findvar1_rx.search(value) or _findvar2_rx.search(value) - if m: - n = m.group(1) - found = True - if n in done: - item = str(done[n]) - elif n in notdone: - # get it on a subsequent round - found = False - elif n in os.environ: - # do it like make: fall back to environment - item = os.environ[n] - else: - done[n] = item = "" - if found: - after = value[m.end():] - value = value[:m.start()] + item + after - if "$" in after: - notdone[name] = value - else: - try: value = int(value) - except ValueError: - done[name] = value.strip() - else: - done[name] = value - del notdone[name] - else: - # bogus variable reference; just drop it since we can't deal - del notdone[name] - # save the results in the global dictionary - vars.update(done) - return vars - - -def _get_makefile_filename(): - if _PYTHON_BUILD: - return os.path.join(_PROJECT_BASE, "Makefile") - return os.path.join(get_path('stdlib'), "config", "Makefile") - - def _init_posix(vars): """Initialize the module as appropriate for POSIX systems.""" - # load the installed Makefile: - makefile = _get_makefile_filename() - try: - _parse_makefile(makefile, vars) - except IOError, e: - msg = "invalid Python installation: unable to open %s" % makefile - if hasattr(e, "strerror"): - msg = msg + " (%s)" % e.strerror - raise IOError(msg) - - # load the installed pyconfig.h: - config_h = get_config_h_filename() - try: - parse_config_h(open(config_h), vars) - except IOError, e: - msg = "invalid Python installation: unable to open %s" % config_h - if hasattr(e, "strerror"): - msg = msg + " (%s)" % e.strerror - raise IOError(msg) - - # On MacOSX we need to check the setting of the environment variable - # MACOSX_DEPLOYMENT_TARGET: configure bases some choices on it so - # it needs to be compatible. - # If it isn't set we set it to the configure-time value - if sys.platform == 'darwin' and 'MACOSX_DEPLOYMENT_TARGET' in vars: - cfg_target = vars['MACOSX_DEPLOYMENT_TARGET'] - cur_target = os.getenv('MACOSX_DEPLOYMENT_TARGET', '') - if cur_target == '': - cur_target = cfg_target - os.putenv('MACOSX_DEPLOYMENT_TARGET', cfg_target) - elif map(int, cfg_target.split('.')) > map(int, cur_target.split('.')): - msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: now "%s" but "%s" ' - 'during configure' % (cur_target, cfg_target)) - raise IOError(msg) - - # On AIX, there are wrong paths to the linker scripts in the Makefile - # -- these paths are relative to the Python source, but when installed - # the scripts are in another directory. - if _PYTHON_BUILD: - vars['LDSHARED'] = vars['BLDSHARED'] + return def _init_non_posix(vars): """Initialize the module as appropriate for NT""" Copied: pypy/branch/fast-forward/lib-python/modified-2.7.0/test/regrtest.py (from r77048, pypy/branch/fast-forward/lib-python/2.7.0/test/regrtest.py) ============================================================================== --- pypy/branch/fast-forward/lib-python/2.7.0/test/regrtest.py (original) +++ pypy/branch/fast-forward/lib-python/modified-2.7.0/test/regrtest.py Mon Sep 13 23:10:59 2010 @@ -682,8 +682,13 @@ def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS): """Return a list of all applicable test modules.""" - testdir = findtestdir(testdir) - names = os.listdir(testdir) + if testdir: + testdirs = [testdir] + else: + testdirs = findtestdirs() + names = {} + for testdir in testdirs: + names.update(dict.fromkeys(os.listdir(testdir))) tests = [] others = set(stdtests) | nottests for name in names: @@ -850,7 +855,6 @@ def runtest_inner(test, verbose, quiet, testdir=None, huntrleaks=False): test_support.unload(test) - testdir = findtestdir(testdir) if verbose: capture_stdout = None else: @@ -1079,8 +1083,19 @@ # Collect cyclic trash. gc.collect() -def findtestdir(path=None): - return path or os.path.dirname(__file__) or os.curdir +def findtestdirs(): + # XXX hacking: returns a list of both the '2.7.0/test' and the + # 'modified-2.7.0/test' directories, as full paths. + testdir = os.path.abspath(os.path.dirname(__file__) or os.curdir) + assert os.path.basename(testdir).lower() == 'test' + maindir = os.path.dirname(testdir) + libpythondir = os.path.dirname(maindir) + maindirname = os.path.basename(maindir).lower() + if maindirname.startswith('modified-'): + maindirname = maindirname[len('modified-'):] + testdir1 = os.path.join(libpythondir, maindirname, 'test') + testdir2 = os.path.join(libpythondir, 'modified-'+maindirname, 'test') + return [testdir1, testdir2] def removepy(names): if not names: @@ -1501,13 +1516,7 @@ return self.expected if __name__ == '__main__': - # findtestdir() gets the dirname out of __file__, so we have to make it - # absolute before changing the working directory. - # For example __file__ may be relative when running trace or profile. - # See issue #9323. - __file__ = os.path.abspath(__file__) - - # sanity check + # Simplification for findtestdir(). assert __file__ == os.path.abspath(sys.argv[0]) # When tests are run from the Python build directory, it is best practice Copied: pypy/branch/fast-forward/lib-python/modified-2.7.0/test/test_sys.py (from r77048, pypy/branch/fast-forward/lib-python/2.7.0/test/test_sys.py) ============================================================================== --- pypy/branch/fast-forward/lib-python/2.7.0/test/test_sys.py (original) +++ pypy/branch/fast-forward/lib-python/modified-2.7.0/test/test_sys.py Mon Sep 13 23:10:59 2010 @@ -264,6 +264,7 @@ self.assertEqual(sys.getdlopenflags(), oldflags+1) sys.setdlopenflags(oldflags) + @test.test_support.impl_detail("reference counting") def test_refcount(self): # n here must be a global in order for this test to pass while # tracing with a python function. Tracing calls PyFrame_FastToLocals @@ -287,7 +288,7 @@ is sys._getframe().f_code ) - # sys._current_frames() is a CPython-only gimmick. + @test.test_support.impl_detail("current_frames") def test_current_frames(self): have_threads = True try: From afa at codespeak.net Mon Sep 13 23:19:16 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Mon, 13 Sep 2010 23:19:16 +0200 (CEST) Subject: [pypy-svn] r77050 - pypy/branch/fast-forward/lib-python Message-ID: <20100913211916.83A46282BD6@codespeak.net> Author: afa Date: Mon Sep 13 23:19:14 2010 New Revision: 77050 Modified: pypy/branch/fast-forward/lib-python/conftest.py Log: List recently added test Modified: pypy/branch/fast-forward/lib-python/conftest.py ============================================================================== --- pypy/branch/fast-forward/lib-python/conftest.py (original) +++ pypy/branch/fast-forward/lib-python/conftest.py Mon Sep 13 23:19:14 2010 @@ -465,6 +465,7 @@ RegrTest('test_ttk_guionly.py'), RegrTest('test_ttk_textonly.py'), RegrTest('test_tokenize.py'), + RegrTest('test_trace.py'), RegrTest('test_traceback.py', core=True), RegrTest('test_transformer.py', core=True), RegrTest('test_tuple.py', core=True), From afa at codespeak.net Mon Sep 13 23:32:04 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Mon, 13 Sep 2010 23:32:04 +0200 (CEST) Subject: [pypy-svn] r77051 - in pypy/branch/fast-forward/pypy: interpreter module/sys Message-ID: <20100913213204.16C56282BD6@codespeak.net> Author: afa Date: Mon Sep 13 23:32:03 2010 New Revision: 77051 Modified: pypy/branch/fast-forward/pypy/interpreter/executioncontext.py pypy/branch/fast-forward/pypy/module/sys/__init__.py pypy/branch/fast-forward/pypy/module/sys/vm.py Log: Implement sys.getprofile() Modified: pypy/branch/fast-forward/pypy/interpreter/executioncontext.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/executioncontext.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/executioncontext.py Mon Sep 13 23:32:03 2010 @@ -218,6 +218,9 @@ else: self.setllprofile(app_profile_call, w_func) + def getprofile(self): + return self.w_profilefuncarg + def setllprofile(self, func, w_arg): if func is not None: if w_arg is None: Modified: pypy/branch/fast-forward/pypy/module/sys/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/sys/__init__.py (original) +++ pypy/branch/fast-forward/pypy/module/sys/__init__.py Mon Sep 13 23:32:03 2010 @@ -51,6 +51,7 @@ 'exc_clear' : 'vm.exc_clear', 'settrace' : 'vm.settrace', 'setprofile' : 'vm.setprofile', + 'getprofile' : 'vm.getprofile', 'call_tracing' : 'vm.call_tracing', 'executable' : 'space.wrap("py.py")', Modified: pypy/branch/fast-forward/pypy/module/sys/vm.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/sys/vm.py (original) +++ pypy/branch/fast-forward/pypy/module/sys/vm.py Mon Sep 13 23:32:03 2010 @@ -112,6 +112,15 @@ and return. See the profiler chapter in the library manual.""" space.getexecutioncontext().setprofile(w_func) +def getprofile(space): + """Set the profiling function. It will be called on each function call +and return. See the profiler chapter in the library manual.""" + w_func = space.getexecutioncontext().getprofile() + if w_func is not None: + return w_func + else: + return space.w_None + def call_tracing(space, w_func, w_args): """Call func(*args), while tracing is enabled. The tracing state is saved, and restored afterwards. This is intended to be called from From dan at codespeak.net Tue Sep 14 03:15:01 2010 From: dan at codespeak.net (dan at codespeak.net) Date: Tue, 14 Sep 2010 03:15:01 +0200 (CEST) Subject: [pypy-svn] r77052 - pypy/branch/micronumpy-ElectronicRU Message-ID: <20100914011501.677A0282BF1@codespeak.net> Author: dan Date: Tue Sep 14 03:14:59 2010 New Revision: 77052 Removed: pypy/branch/micronumpy-ElectronicRU/ Log: Hasn't been touched. From dan at codespeak.net Tue Sep 14 03:16:19 2010 From: dan at codespeak.net (dan at codespeak.net) Date: Tue, 14 Sep 2010 03:16:19 +0200 (CEST) Subject: [pypy-svn] r77053 - pypy/branch/interplevel-ctypes Message-ID: <20100914011619.24B5C282BF1@codespeak.net> Author: dan Date: Tue Sep 14 03:16:17 2010 New Revision: 77053 Removed: pypy/branch/interplevel-ctypes/ Log: I never touched this, getxsick has done good work on this in another branch. From dan at codespeak.net Tue Sep 14 03:18:28 2010 From: dan at codespeak.net (dan at codespeak.net) Date: Tue, 14 Sep 2010 03:18:28 +0200 (CEST) Subject: [pypy-svn] r77054 - pypy/branch/micronumpy-resync Message-ID: <20100914011828.C15EF282BF1@codespeak.net> Author: dan Date: Tue Sep 14 03:18:27 2010 New Revision: 77054 Added: pypy/branch/micronumpy-resync/ (props changed) - copied from r77053, pypy/trunk/ Log: Going to rebase micronumpy changes into this then replace the original micronumpy branch. From afa at codespeak.net Tue Sep 14 09:00:10 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 14 Sep 2010 09:00:10 +0200 (CEST) Subject: [pypy-svn] r77055 - in pypy/branch/fast-forward/pypy/interpreter/pyparser: . test Message-ID: <20100914070010.A9A09282C01@codespeak.net> Author: afa Date: Tue Sep 14 09:00:07 2010 New Revision: 77055 Modified: pypy/branch/fast-forward/pypy/interpreter/pyparser/pytokenize.py pypy/branch/fast-forward/pypy/interpreter/pyparser/test/test_pyparse.py Log: Fix test (with and as are now keywords), and update tokenizer to parse bytes literals Modified: pypy/branch/fast-forward/pypy/interpreter/pyparser/pytokenize.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/pyparser/pytokenize.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/pyparser/pytokenize.py Tue Sep 14 09:00:07 2010 @@ -265,9 +265,11 @@ "r" : None, "R" : None, "u" : None, - "U" : None} + "U" : None, + "b" : None, + "B" : None} -for uniPrefix in ("", "u", "U"): +for uniPrefix in ("", "u", "U", "b", "B"): for rawPrefix in ("", "r", "R"): prefix = uniPrefix + rawPrefix endDFAs[prefix + "'''"] = single3DFA Modified: pypy/branch/fast-forward/pypy/interpreter/pyparser/test/test_pyparse.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/pyparser/test/test_pyparse.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/pyparser/test/test_pyparse.py Tue Sep 14 09:00:07 2010 @@ -16,9 +16,9 @@ info = pyparse.CompileInfo("", mode) return self.parser.parse_source(source, info) - def test_with_and_as_no_future(self): - self.parse("with = 23") - self.parse("as = 2") + def test_with_and_as(self): + py.test.raises(SyntaxError, self.parse, "with = 23") + py.test.raises(SyntaxError, self.parse, "as = 2") def test_dont_imply_dedent(self): info = pyparse.CompileInfo("", "single", @@ -99,3 +99,12 @@ py.test.raises(SyntaxError, self.parse, "x = 54", "eval") tree = self.parse("x = 43", "single") assert tree.type == syms.single_input + + def test_bytes_literal(self): + self.parse('b" "') + self.parse('br" "') + self.parse('b""" """') + self.parse("b''' '''") + self.parse("br'\\\n'") + + py.test.raises(SyntaxError, self.parse, "b'a\\n") From afa at codespeak.net Tue Sep 14 10:17:12 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 14 Sep 2010 10:17:12 +0200 (CEST) Subject: [pypy-svn] r77056 - pypy/branch/fast-forward/lib-python Message-ID: <20100914081712.14EB6282C01@codespeak.net> Author: afa Date: Tue Sep 14 10:17:11 2010 New Revision: 77056 Modified: pypy/branch/fast-forward/lib-python/conftest.py Log: cpython 2.7 tests have no output directory. Modified: pypy/branch/fast-forward/lib-python/conftest.py ============================================================================== --- pypy/branch/fast-forward/lib-python/conftest.py (original) +++ pypy/branch/fast-forward/lib-python/conftest.py Tue Sep 14 10:17:11 2010 @@ -86,27 +86,7 @@ fn = regrtestdir.join(self.basename) return fn - def getoutputpath(self): - p = modregrtestdir.join('output', self.basename).new(ext='') - if p.check(file=1): - return p - p = regrtestdir.join('output', self.basename).new(ext='') - if p.check(file=1): - return p - - def _prepare(self, space): - # output tests sometimes depend on not running in - # verbose mode - if not hasattr(self, '_prepared'): - if self.getoutputpath(): - space.appexec([], """(): - from test import test_support - test_support.verbose = False - """) - self._prepared = True - def run_file(self, space): - self._prepare(space) fspath = self.getfspath() assert fspath.check() modname = fspath.purebasename @@ -632,14 +612,8 @@ regr_script = pypydir.join('tool', 'pytest', 'run-script', 'regrverbose.py') - # we use the regrverbose script to run the test, but don't get - # confused: it still doesn't set verbose to True by default if - # regrtest.outputpath() is true, because output tests get confused - # in verbose mode. You can always force verbose mode by passing - # the -v option to py.test. The regrverbose script contains the - # logic that CPython uses in its regrtest.py. regrrun = str(regr_script) - if not regrtest.getoutputpath() or pypy_option.verbose: + if pypy_option.verbose: regrrun_verbosity = '1' else: regrrun_verbosity = '0' @@ -735,20 +709,10 @@ if test_stderr.rfind(26*"=" + "skipped" + 26*"=") != -1: skipped = True outcome = 'OK' - expectedpath = regrtest.getoutputpath() if not exit_status: - if expectedpath is not None: - expected = expectedpath.read(mode='rU') - test_stdout = "%s\n%s" % (self.fspath.purebasename, test_stdout) - if test_stdout != expected: - exit_status = 2 - res, out, err = py.io.StdCapture.call(reportdiff, expected, test_stdout) - outcome = 'ERROUT' - test_stderr += ("-" * 80 + "\n") + out - else: - if 'FAIL' in test_stdout or re.search('[^:]ERROR', test_stderr): - outcome = 'FAIL' - exit_status = 2 + if 'FAIL' in test_stdout or re.search('[^:]ERROR', test_stderr): + outcome = 'FAIL' + exit_status = 2 elif timedout: outcome = "T/O" else: @@ -763,49 +727,6 @@ lst.append('core') return lst -# test.regrtest.reportdiff was deleted in CPython2.6 -def reportdiff(expected, output): - import difflib - print "*" * 70 - a = expected.splitlines(1) - b = output.splitlines(1) - sm = difflib.SequenceMatcher(a=a, b=b) - tuples = sm.get_opcodes() - - def pair(x0, x1): - # x0:x1 are 0-based slice indices; convert to 1-based line indices. - x0 += 1 - if x0 >= x1: - return "line " + str(x0) - else: - return "lines %d-%d" % (x0, x1) - - for op, a0, a1, b0, b1 in tuples: - if op == 'equal': - pass - - elif op == 'delete': - print "***", pair(a0, a1), "of expected output missing:" - for line in a[a0:a1]: - print "-", line, - - elif op == 'replace': - print "*** mismatch between", pair(a0, a1), "of expected", \ - "output and", pair(b0, b1), "of actual output:" - for line in difflib.ndiff(a[a0:a1], b[b0:b1]): - print line, - - elif op == 'insert': - print "***", pair(b0, b1), "of actual output doesn't appear", \ - "in expected output after line", str(a1)+":" - for line in b[b0:b1]: - print "+", line, - - else: - print "get_opcodes() returned bad tuple?!?!", (op, a0, a1, b0, b1) - - print "*" * 70 - # # Sanity check (could be done more nicely too) # From afa at codespeak.net Tue Sep 14 10:17:43 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 14 Sep 2010 10:17:43 +0200 (CEST) Subject: [pypy-svn] r77057 - pypy/branch/fast-forward/pypy/objspace/std Message-ID: <20100914081743.9A996282C01@codespeak.net> Author: afa Date: Tue Sep 14 10:17:42 2010 New Revision: 77057 Modified: pypy/branch/fast-forward/pypy/objspace/std/stringtype.py Log: Add keyword arguments to str.encode and str.decode Modified: pypy/branch/fast-forward/pypy/objspace/std/stringtype.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/stringtype.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/stringtype.py Tue Sep 14 10:17:42 2010 @@ -252,6 +252,7 @@ ' mapped through the given\ntranslation table, which' ' must be a string of length 256.') str_decode = SMM('decode', 3, defaults=(None, None), + argnames=['encoding', 'errors'], doc='S.decode([encoding[,errors]]) -> object\n\nDecodes S' ' using the codec registered for encoding. encoding' ' defaults\nto the default encoding. errors may be' @@ -262,6 +263,7 @@ ' name registerd with codecs.register_error that' ' is\nable to handle UnicodeDecodeErrors.') str_encode = SMM('encode', 3, defaults=(None, None), + argnames=['encoding', 'errors'], doc='S.encode([encoding[,errors]]) -> object\n\nEncodes S' ' using the codec registered for encoding. encoding' ' defaults\nto the default encoding. errors may be' From arigo at codespeak.net Tue Sep 14 13:46:33 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 14 Sep 2010 13:46:33 +0200 (CEST) Subject: [pypy-svn] r77058 - in pypy/branch/gen2-gc/pypy/rpython/memory/gc: . test Message-ID: <20100914114633.81C4B282C01@codespeak.net> Author: arigo Date: Tue Sep 14 13:46:31 2010 New Revision: 77058 Added: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py - copied unchanged from r77046, pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimark.py - copied, changed from r77046, pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py Removed: pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py Log: Rename the confusingly named "gen2" into "minimark". Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py Tue Sep 14 13:46:31 2010 @@ -458,5 +458,5 @@ test_varsized_from_prebuilt_gc.GC_PARAMS = {'space_size': 3 * 1024 * WORD} -class TestGen2GC(DirectGCTest): - from pypy.rpython.memory.gc.gen2 import Gen2GC as GCClass +class TestMiniMarkGC(DirectGCTest): + from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass Copied: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimark.py (from r77046, pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py) ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimark.py Tue Sep 14 13:46:31 2010 @@ -1,6 +1,6 @@ -from pypy.rpython.memory.gc import gen2 -from pypy.rpython.memory.gc.gen2 import WORD, PAGE_NULL, PAGE_HEADER, PAGE_PTR -from pypy.rpython.memory.gc.gen2 import ARENA, ARENA_NULL +from pypy.rpython.memory.gc import minimark +from pypy.rpython.memory.gc.minimark import PAGE_NULL, PAGE_HEADER, PAGE_PTR +from pypy.rpython.memory.gc.minimark import WORD, ARENA, ARENA_NULL from pypy.rpython.lltypesystem import lltype, llmemory, llarena SHIFT = 4 @@ -9,14 +9,14 @@ def test_allocate_arena(): - a = gen2.allocate_arena(SHIFT + 8*20 + arenasize, 8) + a = minimark.allocate_arena(SHIFT + 8*20 + arenasize, 8) assert a.freepage == a.arena_base + SHIFT assert a.nfreepages == 20 assert a.nuninitializedpages == 20 assert a.prevarena == ARENA_NULL assert a.nextarena == ARENA_NULL # - a = gen2.allocate_arena(SHIFT + 8*20 + 7 + arenasize, 8) + a = minimark.allocate_arena(SHIFT + 8*20 + 7 + arenasize, 8) assert a.freepage == a.arena_base + SHIFT assert a.nfreepages == 20 assert a.nuninitializedpages == 20 @@ -36,7 +36,7 @@ assert llmemory.cast_ptr_to_adr(page) == page2 assert page.nextpage == PAGE_NULL # - ac = gen2.ArenaCollection(arenasize, pagesize, 99) + ac = minimark.ArenaCollection(arenasize, pagesize, 99) assert ac.arenas_start == ac.arenas_end == ARENA_NULL # page = ac.allocate_new_page(5) @@ -64,7 +64,7 @@ def arena_collection_for_test(pagesize, *pagelayouts): nb_pages = len(pagelayouts[0]) arenasize = pagesize * (nb_pages + 1) - 1 - ac = gen2.ArenaCollection(arenasize, pagesize, 9*WORD) + ac = minimark.ArenaCollection(arenasize, pagesize, 9*WORD) # def link(pageaddr, size_class, size_block, nblocks, nusedblocks): llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER)) @@ -81,7 +81,7 @@ for layout in pagelayouts: assert len(layout) == nb_pages assert " " not in layout.rstrip(" ") - a = gen2.allocate_arena(arenasize, pagesize) + a = minimark.allocate_arena(arenasize, pagesize) alist.append(a) assert lltype.typeOf(a.freepage) == llmemory.Address startpageaddr = a.freepage From afa at codespeak.net Tue Sep 14 14:12:17 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 14 Sep 2010 14:12:17 +0200 (CEST) Subject: [pypy-svn] r77059 - pypy/branch/fast-forward/pypy/interpreter Message-ID: <20100914121217.6E1CB282C01@codespeak.net> Author: afa Date: Tue Sep 14 14:12:15 2010 New Revision: 77059 Modified: pypy/branch/fast-forward/pypy/interpreter/typedef.py Log: Restore builtin_method.__doc__ Modified: pypy/branch/fast-forward/pypy/interpreter/typedef.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/typedef.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/typedef.py Tue Sep 14 14:12:15 2010 @@ -843,6 +843,7 @@ '__new__': interp2app(BuiltinFunction.descr_builtinfunction__new__.im_func), '__self__': GetSetProperty(always_none, cls=BuiltinFunction), '__repr__': interp2app(BuiltinFunction.descr_function_repr), + '__doc__': getset_func_doc, }) del BuiltinFunction.typedef.rawdict['__get__'] BuiltinFunction.typedef.acceptable_as_base_class = False From arigo at codespeak.net Tue Sep 14 14:39:26 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 14 Sep 2010 14:39:26 +0200 (CEST) Subject: [pypy-svn] r77060 - in pypy/branch/gen2-gc/pypy/rpython/memory/gc: . test Message-ID: <20100914123926.B4B2C282C01@codespeak.net> Author: arigo Date: Tue Sep 14 14:39:25 2010 New Revision: 77060 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimark.py Log: Simplify the code. Tests are not all fixed yet, because it might change a bit more. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Tue Sep 14 14:39:25 2010 @@ -13,7 +13,7 @@ # ____________________________________________________________ -class Gen2GC(MovingGCBase): +class MiniMarkGC(MovingGCBase): _alloc_flavor_ = "raw" inline_simple_malloc = True inline_simple_malloc_varsize = True @@ -58,85 +58,42 @@ # ____________________________________________________________ -# Terminology: "Arenas" are collection of "pages"; both are fixed-size. +# Terminology: the memory is subdivided into "pages". # A page contains a number of allocated objects, called "blocks". +# The actual allocation occurs in whole arenas, which are subdivided +# into pages. We don't keep track of the arenas. A page can be: +# +# - uninitialized: never touched so far. +# +# - allocated: contains some objects (all of the same size). Starts with a +# PAGE_HEADER. The page is on the chained list of pages that still have +# room for objects of that size, unless it is completely full. +# +# - free: used to be partially full, and is now free again. The page is +# on the chained list of free pages. -ARENA_PTR = lltype.Ptr(lltype.ForwardReference()) -ARENA = lltype.Struct('Arena', - ('arena_base', llmemory.Address), # see allocate_arena() for a description - ('freepage', llmemory.Address), - ('nfreepages', lltype.Signed), - ('nuninitializedpages', lltype.Signed), - ('nextarena', ARENA_PTR), - ('prevarena', ARENA_PTR), - ('arena_index', lltype.Signed), - ) -ARENA_PTR.TO.become(ARENA) -ARENA_NULL = lltype.nullptr(ARENA) - - -# Each initialized page in the arena starts with a PAGE_HEADER. The -# arena typically also contains uninitialized pages at the end. -# Similarily, each page contains blocks of a given size, which can be -# either allocated or freed, and a number of free blocks at the end of -# the page are uninitialized. The free but initialized blocks contain a -# pointer to the next free block, forming a chained list. +# Similarily, each allocated page contains blocks of a given size, which can +# be either uninitialized, allocated or free. PAGE_PTR = lltype.Ptr(lltype.ForwardReference()) PAGE_HEADER = lltype.Struct('PageHeader', - ('nextpage', PAGE_PTR), # chained list of pages with the same size class - ('prevpage', PAGE_PTR), # "", but not initialized for the head of list! - ('nfree', lltype.Signed), # number of free blocks in this page - ('nuninitialized', lltype.Signed), # num. uninitialized blocks (<= nfree) - ('freeblock', llmemory.Address), # first free block, chained list - ('arena_index', lltype.Signed), # index of the arena in 'all_arenas' + # -- The following two pointers make a chained list of pages with the same + # size class. Warning, 'prevpage' contains random garbage for the first + # entry in the list. + ('nextpage', PAGE_PTR), + ('prevpage', PAGE_PTR), + # -- The number of free blocks, and the number of uninitialized blocks. + # The number of allocated blocks is the rest. + ('nuninitialized', lltype.Signed), + ('nfree', lltype.Signed), + # -- The chained list of free blocks. If there are none, points to the + # first uninitialized block. + ('freeblock', llmemory.Address), ) PAGE_PTR.TO.become(PAGE_HEADER) PAGE_NULL = lltype.nullptr(PAGE_HEADER) - -def allocate_arena(arena_size, page_size): - # 'arena_base' points to the start of malloced memory; it might not - # be a page-aligned address - arena_base = llarena.arena_malloc(arena_size, False) - if not arena_base: - raise MemoryError("couldn't allocate the next arena") - # - # 'freepage' points to the first unused page - freepage = start_of_page(arena_base + page_size - 1, page_size) - # - # we stick the ARENA structure either at the start or at the end - # of the big arena, depending on alignment of the malloc'ed memory - arena_end = arena_base + arena_size - struct_size = llmemory.raw_malloc_usage(llmemory.sizeof(ARENA)) - if freepage - arena_base >= struct_size: - arena_addr = arena_base - else: - arena_end -= struct_size - arena_addr = arena_end - # - llarena.arena_reserve(arena_addr, llmemory.sizeof(ARENA), False) - arena = llmemory.cast_adr_to_ptr(arena_addr, ARENA_PTR) - # - arena.arena_base = arena_base - arena.freepage = freepage - # 'nfreepages' is the number of unused pages - arena.nfreepages = (arena_end - freepage) // page_size - arena.nuninitializedpages = arena.nfreepages - # - # The arenas containing at least one free page are linked in a - # doubly-linked list. We keep this chained list in order: it - # starts with the arenas with the most number of allocated - # pages, so that the least allocated arenas near the end of the - # list have a chance to become completely empty and be freed. - arena.nextarena = ARENA_NULL - arena.prevarena = ARENA_NULL - return arena - -def free_arena(arena): - llarena.arena_free(arena.arena_base) - # ____________________________________________________________ @@ -161,13 +118,9 @@ for i in range(1, length): self.nblocks_for_size[i] = (page_size - hdrsize) // (WORD * i) # - self.arenas_start = ARENA_NULL #the most allocated (but not full) arena - self.arenas_end = ARENA_NULL #the least allocated(but not empty)arena - # - self.all_arenas = lltype.malloc(rffi.CArray(ARENA_PTR), 0, - flavor='raw') - self.all_arenas_size = 0 - self.all_arenas_next = 0 + self.uninitialized_pages = PAGE_NULL + self.num_uninitialized_pages = 0 + self.free_pages = PAGE_NULL def malloc(self, size): @@ -183,128 +136,90 @@ page = self.allocate_new_page(size_class) # # The result is simply 'page.freeblock' - ll_assert(page.nfree > 0, "page_for_size lists a page with nfree <= 0") result = page.freeblock - page.nfree -= 1 - if page.nfree == 0: + if page.nfree > 0: # - # This was the last free block, so unlink the page from the - # chained list. - self.page_for_size[size_class] = page.nextpage + # The 'result' was part of the chained list; read the next. + page.nfree -= 1 + page.freeblock = result.address[0] + llarena.arena_reset(result, + llmemory.sizeof(llmemory.Address), + False) # else: - # This was not the last free block, so update 'page.freeblock' - # to point to the next free block. Two cases here... - if page.nfree < page.nuninitialized: - # The 'result' was not initialized at all. We must compute - # the next free block by adding 'size' to 'page.freeblock'. - page.freeblock = result + size - page.nuninitialized -= 1 - ll_assert(page.nfree == page.nuninitialized, - "bad value of page.nuninitialized") - else: - # The 'result' was part of the chained list; read the next. - page.freeblock = result.address[0] - llarena.arena_reset(result, - llmemory.sizeof(llmemory.Address), - False) + # The 'result' is part of the uninitialized blocks. + ll_assert(page.nuninitialized > 0, + "fully allocated page found in the page_for_size list") + page.freeblock = result + size + page.nuninitialized -= 1 + if page.nuninitialized == 0: + # + # This was the last free block, so unlink the page from the + # chained list. + self.page_for_size[size_class] = page.nextpage # llarena.arena_reserve(result, _dummy_size(size), False) return result def allocate_new_page(self, size_class): - """Allocate a new page for the given size_class.""" + """Allocate and return a new page for the given size_class.""" # - # Get the arena with the highest number of pages already allocated - arena = self.arenas_start - if arena == ARENA_NULL: - # No arenas. Get a fresh new arena. - ll_assert(self.arenas_end == ARENA_NULL, - "!arenas_start && arenas_end") - arena = self.allocate_new_arena() - # - # Get the page from there (same logic as in malloc() except on - # pages instead of on blocks) - result = arena.freepage - arena.nfreepages -= 1 - if arena.nfreepages == 0: - # - # This was the last free page, so unlink the arena from the - # chained list. - self.arenas_start = arena.nextarena - if self.arenas_start == ARENA_NULL: - self.arenas_end = ARENA_NULL - else: - self.arenas_start.prevarena = ARENA_NULL + if self.free_pages != PAGE_NULL: # + # Get the page from the chained list 'free_pages'. + page = self.free_pages + self.free_pages = page.address[0] + llarena.arena_reset(self.free_pages, + llmemory.sizeof(llmemory.Address), + False) else: - # This was not the last free page, so update 'arena.freepage' - # to point to the next free page. Two cases here... - if arena.nfreepages < arena.nuninitializedpages: - # The 'result' was not initialized at all. We must compute - # the next free page by adding 'page_size' to 'arena.freepage'. - arena.freepage = result + self.page_size - arena.nuninitializedpages -= 1 - ll_assert(arena.nfreepages == arena.nuninitializedpages, - "bad value of arena.nuninitializedpages") - else: - # The 'result' was part of the chained list; read the next. - arena.freepage = result.address[0] - llarena.arena_reset(result, - llmemory.sizeof(llmemory.Address), - False) + # Get the next free page from the uninitialized pages. + if self.num_uninitialized_pages == 0: + self.allocate_new_arena() # Out of memory. Get a new arena. + page = self.uninitialized_pages + self.uninitialized_pages += self.page_size + self.num_uninitialized_pages -= 1 # # Initialize the fields of the resulting page - llarena.arena_reserve(result, llmemory.sizeof(PAGE_HEADER)) - page = llmemory.cast_adr_to_ptr(result, PAGE_PTR) + llarena.arena_reserve(page, llmemory.sizeof(PAGE_HEADER)) + result = llmemory.cast_adr_to_ptr(page, PAGE_PTR) # hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) - page.nfree = self.nblocks_for_size[size_class] - page.nuninitialized = page.nfree - page.freeblock = result + hdrsize - page.nextpage = PAGE_NULL + result.nuninitialized = self.nblocks_for_size[size_class] + result.nfree = 0 + result.freeblock = page + hdrsize + result.nextpage = PAGE_NULL ll_assert(self.page_for_size[size_class] == PAGE_NULL, "allocate_new_page() called but a page is already waiting") - self.page_for_size[size_class] = page - return page + self.page_for_size[size_class] = result + return result def allocate_new_arena(self): - arena = allocate_arena(self.arena_size, self.page_size) - self.arenas_start = arena - self.arenas_end = arena - # - # Search the next free entry in the 'all_arenas' array - i = self.all_arenas_next - size = self.all_arenas_size - count = size - while count > 0: - if self.all_arenas[i] == ARENA_NULL: - break # 'i' is the free entry - count -= 1 - i += 1 - if i == size: - i = 0 - else: - # - # No more free entry. Resize the array to get some space. - newsize = (size + 3) * 2 - copy = lltype.malloc(rffi.CArray(ARENA_PTR), newsize, - flavor='raw', zero=True) - i = 0 - while i < size: - copy[i] = self.all_arenas[i] - i += 1 - # 'i' is equal to the old 'size', so it's now a free entry - lltype.free(self.all_arenas, flavor='raw') - self.all_arenas = copy - self.all_arenas_size = newsize - # - self.all_arenas_next = i - arena.arena_index = i - self.all_arenas[i] = arena - return arena + ll_assert(self.num_uninitialized_pages == 0, + "some uninitialized pages are already waiting") + # + # 'arena_base' points to the start of malloced memory; it might not + # be a page-aligned address + arena_base = llarena.arena_malloc(self.arena_size, False) + if not arena_base: + raise MemoryError("couldn't allocate the next arena") + arena_end = arena_base + self.arena_size + # + # 'firstpage' points to the first unused page + firstpage = start_of_page(arena_base + self.page_size - 1, + self.page_size) + # 'npages' is the number of full pages just allocated + npages = (arena_end - firstpage) // self.page_size + # + # add these pages to the list + self.uninitialized_pages = firstpage + self.num_uninitialized_pages = npages + # + # increase a bit arena_size for the next time + self.arena_size = (self.arena_size // 4 * 5) + (self.page_size - 1) + self.arena_size = (self.arena_size // self.page_size) * self.page_size def free(self, obj, size): Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimark.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimark.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimark.py Tue Sep 14 14:39:25 2010 @@ -1,27 +1,26 @@ +import py from pypy.rpython.memory.gc import minimark from pypy.rpython.memory.gc.minimark import PAGE_NULL, PAGE_HEADER, PAGE_PTR -from pypy.rpython.memory.gc.minimark import WORD, ARENA, ARENA_NULL +from pypy.rpython.memory.gc.minimark import WORD from pypy.rpython.lltypesystem import lltype, llmemory, llarena +from pypy.rpython.lltypesystem.llmemory import cast_ptr_to_adr SHIFT = 4 hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) -arenasize = llmemory.raw_malloc_usage(llmemory.sizeof(ARENA)) def test_allocate_arena(): - a = minimark.allocate_arena(SHIFT + 8*20 + arenasize, 8) - assert a.freepage == a.arena_base + SHIFT - assert a.nfreepages == 20 - assert a.nuninitializedpages == 20 - assert a.prevarena == ARENA_NULL - assert a.nextarena == ARENA_NULL - # - a = minimark.allocate_arena(SHIFT + 8*20 + 7 + arenasize, 8) - assert a.freepage == a.arena_base + SHIFT - assert a.nfreepages == 20 - assert a.nuninitializedpages == 20 - assert a.prevarena == ARENA_NULL - assert a.nextarena == ARENA_NULL + ac = minimark.ArenaCollection(SHIFT + 8*20, 8, 1) + ac.allocate_new_arena() + assert ac.num_uninitialized_pages == 20 + ac.uninitialized_pages + 8*20 # does not raise + py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 8*20 + 1") + # + ac = minimark.ArenaCollection(SHIFT + 8*20 + 7, 8, 1) + ac.allocate_new_arena() + assert ac.num_uninitialized_pages == 20 + ac.uninitialized_pages + 8*20 + 7 # does not raise + py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 8*20 + 8") def test_allocate_new_page(): @@ -30,34 +29,30 @@ # def checknewpage(page, size_class): size = WORD * size_class - assert page.nfree == (pagesize - hdrsize) // size - assert page.nuninitialized == page.nfree - page2 = page.freeblock - hdrsize - assert llmemory.cast_ptr_to_adr(page) == page2 + assert page.nuninitialized == (pagesize - hdrsize) // size + assert page.nfree == 0 + page1 = page.freeblock - hdrsize + assert llmemory.cast_ptr_to_adr(page) == page1 assert page.nextpage == PAGE_NULL # ac = minimark.ArenaCollection(arenasize, pagesize, 99) - assert ac.arenas_start == ac.arenas_end == ARENA_NULL + assert ac.num_uninitialized_pages == 0 # page = ac.allocate_new_page(5) checknewpage(page, 5) - a = ac.arenas_start - assert a != ARENA_NULL - assert a == ac.arenas_end - assert a.nfreepages == 2 - assert a.freepage == a.arena_base + SHIFT + pagesize + assert ac.num_uninitialized_pages == 2 + assert ac.uninitialized_pages - pagesize == cast_ptr_to_adr(page) assert ac.page_for_size[5] == page # page = ac.allocate_new_page(3) checknewpage(page, 3) - assert a == ac.arenas_start == ac.arenas_end - assert a.nfreepages == 1 - assert a.freepage == a.arena_base + SHIFT + 2*pagesize + assert ac.num_uninitialized_pages == 1 + assert ac.uninitialized_pages - pagesize == cast_ptr_to_adr(page) assert ac.page_for_size[3] == page # page = ac.allocate_new_page(4) checknewpage(page, 4) - assert ac.arenas_start == ac.arenas_end == ARENA_NULL # has been unlinked + assert ac.num_uninitialized_pages == 0 assert ac.page_for_size[4] == page @@ -139,12 +134,9 @@ def test_simple_arena_collection(): - # Test supposing that we have two partially-used arenas pagesize = hdrsize + 16 - ac = arena_collection_for_test(pagesize, - "##.. ", - ".# ") - assert ac.arenas_start.nfreepages == 3 + ac = arena_collection_for_test(pagesize, "##....# ") + #assert ac.... assert ac.arenas_end.nfreepages == 4 # a0 = getarena(ac, 0, total=2) From cfbolz at codespeak.net Tue Sep 14 15:17:32 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 14 Sep 2010 15:17:32 +0200 (CEST) Subject: [pypy-svn] r77061 - in pypy/branch/better-map-instances/pypy: interpreter objspace/std Message-ID: <20100914131732.4BBB7282C01@codespeak.net> Author: cfbolz Date: Tue Sep 14 15:17:30 2010 New Revision: 77061 Modified: pypy/branch/better-map-instances/pypy/interpreter/baseobjspace.py pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Log: hide the storage implementation behind an interface, to make it more clever soon Modified: pypy/branch/better-map-instances/pypy/interpreter/baseobjspace.py ============================================================================== --- pypy/branch/better-map-instances/pypy/interpreter/baseobjspace.py (original) +++ pypy/branch/better-map-instances/pypy/interpreter/baseobjspace.py Tue Sep 14 15:17:30 2010 @@ -169,10 +169,14 @@ # hooks that the mapdict implementations needs: def _get_mapdict_map(self): return None - def _get_mapdict_storage(self): - return None def _set_mapdict_map(self, map): raise NotImplementedError + def _mapdict_read_storage(self, index): + raise NotImplementedError + def _mapdict_write_storage(self, index, value): + raise NotImplementedError + def _mapdict_storage_length(self): + raise NotImplementedError def _set_mapdict_storage(self, storage): raise NotImplementedError Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Tue Sep 14 15:17:30 2010 @@ -69,14 +69,14 @@ oldattr = obj._get_mapdict_map() if not jit.we_are_jitted(): oldattr._size_estimate += attr.size_estimate() - oldattr.size_estimate() - if attr.length() > len(obj._get_mapdict_storage()): + if attr.length() > obj._mapdict_storage_length(): # note that attr.size_estimate() is always at least attr.length() new_storage = [None] * attr.size_estimate() - for i in range(len(obj._get_mapdict_storage())): - new_storage[i] = obj._get_mapdict_storage()[i] + for i in range(obj._mapdict_storage_length()): + new_storage[i] = obj._mapdict_read_storage(i) obj._set_mapdict_storage(new_storage) - obj._get_mapdict_storage()[attr.position] = w_value + obj._mapdict_write_storage(attr.position, w_value) obj._set_mapdict_map(attr) def materialize_r_dict(self, space, obj, w_d): @@ -85,6 +85,9 @@ def remove_dict_entries(self, obj): raise NotImplementedError("abstract base class") + def __repr__(self): + return "<%s w_cls=%s>" % (self.__class__.__name__, self.w_cls) + class Terminator(AbstractAttribute): @@ -189,12 +192,12 @@ def read(self, obj, selector): if selector == self.selector: - return obj._get_mapdict_storage()[self.position] + return obj._mapdict_read_storage(self.position) return self.back.read(obj, selector) def write(self, obj, selector, w_value): if selector == self.selector: - obj._get_mapdict_storage()[self.position] = w_value + obj._mapdict_write_storage(self.position, w_value) return True return self.back.write(obj, selector, w_value) @@ -237,7 +240,7 @@ new_obj = self.back.materialize_r_dict(space, obj, w_d) if self.selector[1] == DICT: w_attr = space.wrap(self.selector[0]) - w_d.r_dict_content[w_attr] = obj._get_mapdict_storage()[self.position] + w_d.r_dict_content[w_attr] = obj._mapdict_read_storage(self.position) else: self._copy_attr(obj, new_obj) return new_obj @@ -248,6 +251,9 @@ self._copy_attr(obj, new_obj) return new_obj + def __repr__(self): + return "" % (self.selector, self.position, self.back) + def _become(w_obj, new_obj): # this is like the _become method, really, but we cannot use that due to # RPython reasons @@ -273,10 +279,14 @@ def _get_mapdict_map(self): return jit.hint(self.map, promote=True) - def _get_mapdict_storage(self): - return self.storage def _set_mapdict_map(self, map): self.map = map + def _mapdict_read_storage(self, index): + return self.storage[index] + def _mapdict_write_storage(self, index, value): + self.storage[index] = value + def _mapdict_storage_length(self): + return len(self.storage) def _set_mapdict_storage(self, storage): self.storage = storage @@ -486,13 +496,15 @@ # everything matches, it's incredibly fast if pycode.space.config.objspace.std.withmethodcachecounter: entry.success_counter += 1 - return w_obj._get_mapdict_storage()[entry.index] + return w_obj._mapdict_read_storage(entry.index) return LOAD_ATTR_slowpath(pycode, w_obj, nameindex, map) LOAD_ATTR_caching._always_inline_ = True def LOAD_ATTR_slowpath(pycode, w_obj, nameindex, map): space = pycode.space w_name = pycode.co_names_w[nameindex] + if space.str_w(w_name) == "task_holding": + print map if map is not None: w_type = map.w_cls w_descr = w_type.getattribute_if_not_from_object() @@ -523,7 +535,7 @@ entry.index = index if space.config.objspace.std.withmethodcachecounter: entry.failure_counter += 1 - return w_obj._get_mapdict_storage()[index] + return w_obj._mapdict_read_storage(index) if space.config.objspace.std.withmethodcachecounter: INVALID_CACHE_ENTRY.failure_counter += 1 return space.getattr(w_obj, w_name) From cfbolz at codespeak.net Tue Sep 14 15:22:30 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 14 Sep 2010 15:22:30 +0200 (CEST) Subject: [pypy-svn] r77062 - in pypy/branch/better-map-instances/pypy/rlib: . test Message-ID: <20100914132230.05B74282C01@codespeak.net> Author: cfbolz Date: Tue Sep 14 15:22:28 2010 New Revision: 77062 Added: pypy/branch/better-map-instances/pypy/rlib/rerased.py (props changed) - copied unchanged from r77061, pypy/branch/reduce-instance-size-experiments/pypy/rlib/rerased.py pypy/branch/better-map-instances/pypy/rlib/test/test_rerased.py (props changed) - copied unchanged from r77061, pypy/branch/reduce-instance-size-experiments/pypy/rlib/test/test_rerased.py Log: put the usual bits from the reduce-instance-size-experiments branch here From cfbolz at codespeak.net Tue Sep 14 15:22:55 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 14 Sep 2010 15:22:55 +0200 (CEST) Subject: [pypy-svn] r77063 - pypy/branch/reduce-instance-size-experiments Message-ID: <20100914132255.AD940282C01@codespeak.net> Author: cfbolz Date: Tue Sep 14 15:22:54 2010 New Revision: 77063 Removed: pypy/branch/reduce-instance-size-experiments/ Log: remove the branch, now that the useful things (explicit type erasure) are moved to better-map-instances From cfbolz at codespeak.net Tue Sep 14 15:57:39 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 14 Sep 2010 15:57:39 +0200 (CEST) Subject: [pypy-svn] r77064 - in pypy/branch/better-map-instances/pypy/rlib: . test Message-ID: <20100914135739.B80B6282C0C@codespeak.net> Author: cfbolz Date: Tue Sep 14 15:57:38 2010 New Revision: 77064 Modified: pypy/branch/better-map-instances/pypy/rlib/rerased.py pypy/branch/better-map-instances/pypy/rlib/test/test_rerased.py Log: Switch to using GCREF instead of OBJECTPTR, to also support lists and strings soon. Modified: pypy/branch/better-map-instances/pypy/rlib/rerased.py ============================================================================== --- pypy/branch/better-map-instances/pypy/rlib/rerased.py (original) +++ pypy/branch/better-map-instances/pypy/rlib/rerased.py Tue Sep 14 15:57:38 2010 @@ -10,7 +10,7 @@ from pypy.rpython.rmodel import Repr from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.lltypesystem.rclass import OBJECTPTR -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.error import TyperError @@ -72,7 +72,7 @@ c_one = hop.inputconst(lltype.Signed, 1) vi = hop.genop('cast_ptr_to_int', [v], resulttype=lltype.Signed) return hop.genop('int_rshift', [vi, c_one], resulttype=lltype.Signed) - return hop.genop('cast_pointer', [v], resulttype = hop.r_result) + return hop.genop('cast_opaque_ptr', [v], resulttype = hop.r_result) class Entry(ExtRegistryEntry): _about_ = is_integer @@ -119,7 +119,7 @@ class ErasedRepr(Repr): - lowleveltype = OBJECTPTR + lowleveltype = llmemory.GCREF def __init__(self, rtyper): self.rtyper = rtyper @@ -129,7 +129,9 @@ if (isinstance(s_arg, annmodel.SomeInstance) or (s_arg.is_constant() and s_arg.const is None)): hop.exception_cannot_occur() - [v] = hop.inputargs(r_generic_object) # might generate a cast_pointer + [v_instance] = hop.inputargs(r_generic_object) # might generate a cast_pointer + v = hop.genop('cast_opaque_ptr', [v_instance], + resulttype=self.lowleveltype) return v else: assert isinstance(s_arg, annmodel.SomeInteger) @@ -140,9 +142,11 @@ resulttype = lltype.Signed) v2p1 = hop.genop('int_add', [v2, c_one], resulttype = lltype.Signed) - v_instance = hop.genop('cast_int_to_ptr', [v2p1], - resulttype = self.lowleveltype) - return v_instance + v_instance = hop.genop('cast_int_to_ptr', [v2p1], + resulttype=self.lowleveltype) + v = hop.genop('cast_opaque_ptr', [v_instance], + resulttype=self.lowleveltype) + return v def convert_const(self, value): @@ -151,5 +155,5 @@ else: r_generic_object = getinstancerepr(self.rtyper, None) v = r_generic_object.convert_const(value._x) - return v + return lltype.cast_opaque_ptr(self.lowleveltype, v) Modified: pypy/branch/better-map-instances/pypy/rlib/test/test_rerased.py ============================================================================== --- pypy/branch/better-map-instances/pypy/rlib/test/test_rerased.py (original) +++ pypy/branch/better-map-instances/pypy/rlib/test/test_rerased.py Tue Sep 14 15:57:38 2010 @@ -5,7 +5,7 @@ from pypy.annotation.annrpython import RPythonAnnotator from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem.rclass import OBJECTPTR -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, llmemory class X(object): @@ -70,7 +70,7 @@ def f(): return erase(X()) x = interpret(f, []) - assert lltype.typeOf(x) == OBJECTPTR + assert lltype.typeOf(x) == llmemory.GCREF def test_rtype_2(): def f(): From cfbolz at codespeak.net Tue Sep 14 15:59:07 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 14 Sep 2010 15:59:07 +0200 (CEST) Subject: [pypy-svn] r77065 - pypy/branch/better-map-instances/pypy/rlib/test Message-ID: <20100914135907.B4164282C0C@codespeak.net> Author: cfbolz Date: Tue Sep 14 15:59:06 2010 New Revision: 77065 Modified: pypy/branch/better-map-instances/pypy/rlib/test/test_rerased.py Log: improve test Modified: pypy/branch/better-map-instances/pypy/rlib/test/test_rerased.py ============================================================================== --- pypy/branch/better-map-instances/pypy/rlib/test/test_rerased.py (original) +++ pypy/branch/better-map-instances/pypy/rlib/test/test_rerased.py Tue Sep 14 15:59:06 2010 @@ -35,7 +35,9 @@ def test_simple_int_overflow(): py.test.raises(OverflowError, erase, sys.maxint) + py.test.raises(OverflowError, erase, sys.maxint-1) py.test.raises(OverflowError, erase, -sys.maxint) + py.test.raises(OverflowError, erase, -sys.maxint-1) def test_annotate_1(): def f(): From afa at codespeak.net Tue Sep 14 16:16:51 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 14 Sep 2010 16:16:51 +0200 (CEST) Subject: [pypy-svn] r77066 - in pypy/branch/fast-forward/pypy/module/itertools: . test Message-ID: <20100914141651.AFC1F282C0C@codespeak.net> Author: afa Date: Tue Sep 14 16:16:50 2010 New Revision: 77066 Modified: pypy/branch/fast-forward/pypy/module/itertools/__init__.py pypy/branch/fast-forward/pypy/module/itertools/interp_itertools.py pypy/branch/fast-forward/pypy/module/itertools/test/test_itertools.py Log: Implement itertools.izip_longest() Modified: pypy/branch/fast-forward/pypy/module/itertools/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/itertools/__init__.py (original) +++ pypy/branch/fast-forward/pypy/module/itertools/__init__.py Tue Sep 14 16:16:50 2010 @@ -35,6 +35,7 @@ 'imap' : 'interp_itertools.W_IMap', 'islice' : 'interp_itertools.W_ISlice', 'izip' : 'interp_itertools.W_IZip', + 'izip_longest' : 'interp_itertools.W_IZipLongest', 'repeat' : 'interp_itertools.W_Repeat', 'starmap' : 'interp_itertools.W_StarMap', 'takewhile' : 'interp_itertools.W_TakeWhile', Modified: pypy/branch/fast-forward/pypy/module/itertools/interp_itertools.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/itertools/interp_itertools.py (original) +++ pypy/branch/fast-forward/pypy/module/itertools/interp_itertools.py Tue Sep 14 16:16:50 2010 @@ -1,7 +1,8 @@ from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError from pypy.interpreter.typedef import TypeDef, make_weakref_descr -from pypy.interpreter.gateway import interp2app, ObjSpace, W_Root +from pypy.interpreter.gateway import interp2app, ObjSpace, W_Root, unwrap_spec +from pypy.interpreter.argument import Arguments from pypy.rlib.rarithmetic import ovfcheck class W_Count(Wrappable): @@ -545,6 +546,67 @@ """) +class W_IZipLongest(W_IMap): + _error_name = "izip_longest" + + def next_w(self): + space = self.space + nb = len(self.iterators_w) + + if nb == 0: + raise OperationError(space.w_StopIteration, space.w_None) + + objects_w = [None] * nb + for index in range(nb): + w_value = self.w_fillvalue + w_it = self.iterators_w[index] + if w_it is not None: + try: + w_value = space.next(w_it) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + + self.active -= 1 + if self.active == 0: + # It was the last active iterator + raise + self.iterators_w[index] = None + + objects_w[index] = w_value + return space.newtuple(objects_w) + + at unwrap_spec(ObjSpace, W_Root, Arguments) +def W_IZipLongest___new__(space, w_subtype, __args__): + kwds = __args__.keywords + w_fillvalue = space.w_None + if kwds: + if kwds[0] == "fillvalue" and len(kwds) == 1: + w_fillvalue = __args__.keywords_w[0] + else: + raise OperationError(space.w_TypeError, space.wrap( + "izip_longest() got unexpected keyword argument")) + + self = W_IZipLongest(space, space.w_None, __args__.arguments_w) + self.w_fillvalue = w_fillvalue + self.active = len(self.iterators_w) + + return space.wrap(self) + +W_IZipLongest.typedef = TypeDef( + 'izip_longest', + __new__ = interp2app(W_IZipLongest___new__), + __iter__ = interp2app(W_IZipLongest.iter_w, unwrap_spec=['self']), + next = interp2app(W_IZipLongest.next_w, unwrap_spec=['self']), + __doc__ = """Return an izip_longest object whose .next() method returns a tuple where + the i-th element comes from the i-th iterable argument. The .next() + method continues until the longest iterable in the argument sequence + is exhausted and then it raises StopIteration. When the shorter iterables + are exhausted, the fillvalue is substituted in their place. The fillvalue + defaults to None or can be specified by a keyword argument. + """) + + class W_Cycle(Wrappable): def __init__(self, space, w_iterable): Modified: pypy/branch/fast-forward/pypy/module/itertools/test/test_itertools.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/itertools/test/test_itertools.py (original) +++ pypy/branch/fast-forward/pypy/module/itertools/test/test_itertools.py Tue Sep 14 16:16:50 2010 @@ -626,3 +626,85 @@ a, b = itertools.tee(iter('abc')) ref = weakref.ref(b) assert ref() is b + + def test_iziplongest(self): + from itertools import izip_longest, islice, count + for args in [ + ['abc', range(6)], + [range(6), 'abc'], + [range(100), range(200,210), range(300,305)], + [range(100), range(0), range(300,305), range(120), range(150)], + [range(100), range(0), range(300,305), range(120), range(0)], + ]: + # target = map(None, *args) <- this raises a py3k warning + # this is the replacement: + target = [tuple([arg[i] if i < len(arg) else None for arg in args]) + for i in range(max(map(len, args)))] + assert list(izip_longest(*args)) == target + assert list(izip_longest(*args, **{})) == target + + # Replace None fills with 'X' + target = [tuple((e is None and 'X' or e) for e in t) for t in target] + assert list(izip_longest(*args, **dict(fillvalue='X'))) == target + + # take 3 from infinite input + assert (list(islice(izip_longest('abcdef', count()),3)) == + zip('abcdef', range(3))) + + assert list(izip_longest()) == zip() + assert list(izip_longest([])) == zip([]) + assert list(izip_longest('abcdef')) == zip('abcdef') + + assert (list(izip_longest('abc', 'defg', **{})) == + zip(list('abc') + [None], 'defg')) # empty keyword dict + raises(TypeError, izip_longest, 3) + raises(TypeError, izip_longest, range(3), 3) + + for stmt in [ + "izip_longest('abc', fv=1)", + "izip_longest('abc', fillvalue=1, bogus_keyword=None)", + ]: + try: + eval(stmt, globals(), locals()) + except TypeError: + pass + else: + self.fail('Did not raise Type in: ' + stmt) + + def test_izip_longest2(self): + import itertools + class Repeater(object): + # this class is similar to itertools.repeat + def __init__(self, o, t, e): + self.o = o + self.t = int(t) + self.e = e + def __iter__(self): # its iterator is itself + return self + def next(self): + if self.t > 0: + self.t -= 1 + return self.o + else: + raise self.e + + # Formerly this code in would fail in debug mode + # with Undetected Error and Stop Iteration + r1 = Repeater(1, 3, StopIteration) + r2 = Repeater(2, 4, StopIteration) + def run(r1, r2): + result = [] + for i, j in itertools.izip_longest(r1, r2, fillvalue=0): + result.append((i, j)) + return result + assert run(r1, r2) == [(1,2), (1,2), (1,2), (0,2)] + + # Formerly, the RuntimeError would be lost + # and StopIteration would stop as expected + r1 = Repeater(1, 3, RuntimeError) + r2 = Repeater(2, 4, StopIteration) + it = itertools.izip_longest(r1, r2, fillvalue=0) + assert it.next() == (1, 2) + assert it.next() == (1, 2) + assert it.next()== (1, 2) + raises(RuntimeError, it.next) From afa at codespeak.net Tue Sep 14 16:34:20 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 14 Sep 2010 16:34:20 +0200 (CEST) Subject: [pypy-svn] r77067 - in pypy/branch/fast-forward: . pypy/jit/backend pypy/jit/backend/llgraph pypy/jit/backend/test pypy/jit/backend/x86 pypy/jit/metainterp pypy/jit/metainterp/optimizeopt pypy/jit/metainterp/test pypy/jit/tool pypy/module/_socket pypy/module/_socket/test pypy/module/_ssl pypy/module/_weakref pypy/module/_winreg pypy/module/array pypy/module/array/benchmark pypy/module/array/test pypy/module/pypyjit pypy/module/pypyjit/test pypy/rlib pypy/rpython/lltypesystem pypy/rpython/module pypy/rpython/tool pypy/rpython/tool/test pypy/tool/release pypy/translator/c Message-ID: <20100914143420.EAEBB282C0C@codespeak.net> Author: afa Date: Tue Sep 14 16:34:16 2010 New Revision: 77067 Added: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/ - copied from r77066, pypy/trunk/pypy/jit/metainterp/optimizeopt/ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/__init__.py - copied unchanged from r77066, pypy/trunk/pypy/jit/metainterp/optimizeopt/__init__.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/heap.py - copied unchanged from r77066, pypy/trunk/pypy/jit/metainterp/optimizeopt/heap.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/intbounds.py - copied unchanged from r77066, pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/intutils.py - copied unchanged from r77066, pypy/trunk/pypy/jit/metainterp/optimizeopt/intutils.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/optimizer.py - copied unchanged from r77066, pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/rewrite.py - copied unchanged from r77066, pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/virtualize.py - copied unchanged from r77066, pypy/trunk/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_intbound.py - copied unchanged from r77066, pypy/trunk/pypy/jit/metainterp/test/test_intbound.py pypy/branch/fast-forward/pypy/module/pypyjit/test/randomized.py - copied unchanged from r77066, pypy/trunk/pypy/module/pypyjit/test/randomized.py Removed: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt.py Modified: pypy/branch/fast-forward/ (props changed) pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py pypy/branch/fast-forward/pypy/jit/backend/llgraph/runner.py pypy/branch/fast-forward/pypy/jit/backend/model.py pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py pypy/branch/fast-forward/pypy/jit/backend/x86/runner.py pypy/branch/fast-forward/pypy/jit/metainterp/compile.py pypy/branch/fast-forward/pypy/jit/metainterp/jitdriver.py pypy/branch/fast-forward/pypy/jit/metainterp/pyjitpl.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_basic.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_compile.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_recursive.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_resume.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_warmstate.py pypy/branch/fast-forward/pypy/jit/metainterp/warmspot.py pypy/branch/fast-forward/pypy/jit/metainterp/warmstate.py pypy/branch/fast-forward/pypy/jit/tool/traceviewer.py pypy/branch/fast-forward/pypy/module/_socket/interp_func.py pypy/branch/fast-forward/pypy/module/_socket/interp_socket.py pypy/branch/fast-forward/pypy/module/_socket/test/test_sock_app.py pypy/branch/fast-forward/pypy/module/_ssl/interp_ssl.py pypy/branch/fast-forward/pypy/module/_weakref/interp__weakref.py pypy/branch/fast-forward/pypy/module/_winreg/interp_winreg.py pypy/branch/fast-forward/pypy/module/array/benchmark/Makefile (props changed) pypy/branch/fast-forward/pypy/module/array/benchmark/intimg.c (props changed) pypy/branch/fast-forward/pypy/module/array/benchmark/intimgtst.c (props changed) pypy/branch/fast-forward/pypy/module/array/benchmark/intimgtst.py (props changed) pypy/branch/fast-forward/pypy/module/array/benchmark/loop.c (props changed) pypy/branch/fast-forward/pypy/module/array/benchmark/sum.c (props changed) pypy/branch/fast-forward/pypy/module/array/benchmark/sumtst.c (props changed) pypy/branch/fast-forward/pypy/module/array/benchmark/sumtst.py (props changed) pypy/branch/fast-forward/pypy/module/array/interp_array.py pypy/branch/fast-forward/pypy/module/array/test/test_array.py pypy/branch/fast-forward/pypy/module/array/test/test_array_old.py (props changed) pypy/branch/fast-forward/pypy/module/pypyjit/interp_jit.py pypy/branch/fast-forward/pypy/module/pypyjit/policy.py pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py pypy/branch/fast-forward/pypy/rlib/_rsocket_rffi.py pypy/branch/fast-forward/pypy/rlib/jit.py pypy/branch/fast-forward/pypy/rlib/rmmap.py pypy/branch/fast-forward/pypy/rlib/rsocket.py pypy/branch/fast-forward/pypy/rlib/rwin32.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/rffi.py pypy/branch/fast-forward/pypy/rpython/module/ll_os.py pypy/branch/fast-forward/pypy/rpython/module/ll_os_stat.py pypy/branch/fast-forward/pypy/rpython/module/ll_win32file.py pypy/branch/fast-forward/pypy/rpython/tool/rffi_platform.py pypy/branch/fast-forward/pypy/rpython/tool/test/test_rffi_platform.py pypy/branch/fast-forward/pypy/tool/release/force-builds.py pypy/branch/fast-forward/pypy/translator/c/database.py Log: Merge from trunk --Cette ligne, et les suivantes ci-dessous, seront ignor?es-- _M . M pypy/module/_weakref/interp__weakref.py M pypy/module/_socket/test/test_sock_app.py M pypy/module/_socket/interp_socket.py M pypy/module/_socket/interp_func.py M pypy/module/_winreg/interp_winreg.py M pypy/module/_ssl/interp_ssl.py A + pypy/module/pypyjit/test/randomized.py M pypy/module/pypyjit/test/test_pypy_c.py M pypy/module/pypyjit/policy.py M pypy/module/pypyjit/interp_jit.py _M pypy/module/array/test/test_array_old.py M pypy/module/array/test/test_array.py M pypy/module/array/interp_array.py _M pypy/module/array/benchmark/sumtst.c _M pypy/module/array/benchmark/intimgtst.c _M pypy/module/array/benchmark/sum.c _M pypy/module/array/benchmark/sumtst.py _M pypy/module/array/benchmark/intimg.c _M pypy/module/array/benchmark/intimgtst.py _M pypy/module/array/benchmark/loop.c _M pypy/module/array/benchmark/Makefile M pypy/jit/backend/test/runner_test.py M pypy/jit/backend/x86/regalloc.py M pypy/jit/backend/x86/runner.py M pypy/jit/backend/x86/assembler.py M pypy/jit/backend/model.py M pypy/jit/backend/llgraph/llimpl.py M pypy/jit/backend/llgraph/runner.py M pypy/jit/metainterp/compile.py M pypy/jit/metainterp/test/test_recursive.py M pypy/jit/metainterp/test/test_basic.py M pypy/jit/metainterp/test/test_resume.py M pypy/jit/metainterp/test/test_warmstate.py A + pypy/jit/metainterp/test/test_intbound.py M pypy/jit/metainterp/test/test_optimizeopt.py M pypy/jit/metainterp/test/test_compile.py M pypy/jit/metainterp/pyjitpl.py A + pypy/jit/metainterp/optimizeopt A + pypy/jit/metainterp/optimizeopt/intutils.py A + pypy/jit/metainterp/optimizeopt/heap.py A + pypy/jit/metainterp/optimizeopt/virtualize.py A + pypy/jit/metainterp/optimizeopt/__init__.py A + pypy/jit/metainterp/optimizeopt/rewrite.py A + pypy/jit/metainterp/optimizeopt/optimizer.py A + pypy/jit/metainterp/optimizeopt/intbounds.py M pypy/jit/metainterp/jitdriver.py M pypy/jit/metainterp/warmstate.py M pypy/jit/metainterp/warmspot.py D pypy/jit/metainterp/optimizeopt.py M pypy/jit/tool/traceviewer.py M pypy/rlib/rsocket.py M pypy/rlib/rmmap.py M pypy/rlib/rwin32.py M pypy/rlib/jit.py M pypy/rlib/_rsocket_rffi.py M pypy/translator/c/database.py M pypy/rpython/tool/test/test_rffi_platform.py M pypy/rpython/tool/rffi_platform.py M pypy/rpython/lltypesystem/rffi.py M pypy/rpython/module/ll_os.py M pypy/rpython/module/ll_win32file.py M pypy/rpython/module/ll_os_stat.py M pypy/tool/release/force-builds.py Modified: pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py Tue Sep 14 16:34:16 2010 @@ -128,7 +128,7 @@ 'getarrayitem_raw_pure' : (('ref', 'int'), 'intorptr'), 'arraylen_gc' : (('ref',), 'int'), 'call' : (('ref', 'varargs'), 'intorptr'), - 'call_assembler' : (('ref', 'varargs'), 'intorptr'), + 'call_assembler' : (('varargs',), 'intorptr'), 'cond_call_gc_wb' : (('ptr', 'ptr'), None), 'oosend' : (('varargs',), 'intorptr'), 'oosend_pure' : (('varargs',), 'intorptr'), @@ -165,10 +165,13 @@ self.inputargs = [] self.operations = [] + def getargtypes(self): + return [v.concretetype for v in self.inputargs] + def __repr__(self): lines = [] self.as_text(lines, 1) - return 'CompiledLoop:\n%s' % '\n'.join(lines) + return 'CompiledLoop %s:\n%s' % (self.inputargs, '\n'.join(lines)) def as_text(self, lines, indent): for op in self.operations: @@ -839,6 +842,8 @@ def op_call_assembler(self, loop_token, *args): global _last_exception assert not self._forced + loop_token = self.cpu._redirected_call_assembler.get(loop_token, + loop_token) self._may_force = self.opindex try: inpargs = _from_opaque(loop_token._llgraph_compiled_version).inputargs @@ -861,6 +866,21 @@ vable = args[jd.index_of_virtualizable] else: vable = lltype.nullptr(llmemory.GCREF.TO) + # + # Emulate the fast path + if failindex == self.cpu.done_with_this_frame_int_v: + reset_vable(jd, vable) + return self.cpu.get_latest_value_int(0) + if failindex == self.cpu.done_with_this_frame_ref_v: + reset_vable(jd, vable) + return self.cpu.get_latest_value_ref(0) + if failindex == self.cpu.done_with_this_frame_float_v: + reset_vable(jd, vable) + return self.cpu.get_latest_value_float(0) + if failindex == self.cpu.done_with_this_frame_void_v: + reset_vable(jd, vable) + return None + # assembler_helper_ptr = jd.assembler_helper_adr.ptr # fish try: return assembler_helper_ptr(failindex, vable) @@ -1480,6 +1500,17 @@ else: return 0 +def reset_vable(jd, vable): + if jd.index_of_virtualizable != -1: + fielddescr = jd.vable_token_descr + do_setfield_gc_int(vable, fielddescr.ofs, 0) + +def redirect_call_assembler(cpu, oldlooptoken, newlooptoken): + OLD = _from_opaque(oldlooptoken._llgraph_compiled_version).getargtypes() + NEW = _from_opaque(newlooptoken._llgraph_compiled_version).getargtypes() + assert OLD == NEW + cpu._redirected_call_assembler[oldlooptoken] = newlooptoken + # ____________________________________________________________ Modified: pypy/branch/fast-forward/pypy/jit/backend/llgraph/runner.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/llgraph/runner.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/llgraph/runner.py Tue Sep 14 16:34:16 2010 @@ -102,6 +102,7 @@ llimpl._llinterp = LLInterpreter(self.rtyper) self._future_values = [] self._descrs = {} + self._redirected_call_assembler = {} def _freeze_(self): assert self.translate_support_code @@ -169,8 +170,8 @@ elif isinstance(x, history.ConstFloat): llimpl.compile_add_float_const(c, x.value) else: - raise Exception("%s args contain: %r" % (op.getopname(), - x)) + raise Exception("'%s' args contain: %r" % (op.getopname(), + x)) if op.is_guard(): faildescr = op.descr assert isinstance(faildescr, history.AbstractFailDescr) @@ -260,6 +261,11 @@ def clear_latest_values(self, count): llimpl.frame_clear_latest_values(self.latest_frame, count) + def redirect_call_assembler(self, oldlooptoken, newlooptoken): + if we_are_translated(): + raise ValueError("CALL_ASSEMBLER not supported") + llimpl.redirect_call_assembler(self, oldlooptoken, newlooptoken) + # ---------- def sizeof(self, S): Modified: pypy/branch/fast-forward/pypy/jit/backend/model.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/model.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/model.py Tue Sep 14 16:34:16 2010 @@ -107,6 +107,12 @@ GUARD_NO_EXCEPTION. (Returns a GCREF)""" # XXX remove me raise NotImplementedError + def redirect_call_assembler(self, oldlooptoken, newlooptoken): + """Redirect oldlooptoken to newlooptoken. More precisely, it is + enough to redirect all CALL_ASSEMBLERs already compiled that call + oldlooptoken so that from now own they will call newlooptoken.""" + raise NotImplementedError + @staticmethod def sizeof(S): raise NotImplementedError Modified: pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py Tue Sep 14 16:34:16 2010 @@ -1777,7 +1777,7 @@ self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 RES = lltype.Signed - self.cpu.portal_calldescr = self.cpu.calldescrof( + FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES) for i in range(10): self.cpu.set_future_value_int(i, i+1) @@ -1816,7 +1816,7 @@ ARGS = [lltype.Float, lltype.Float] RES = lltype.Float - self.cpu.portal_calldescr = self.cpu.calldescrof( + FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES) ops = ''' @@ -1824,6 +1824,7 @@ f2 = float_add(f0, f1) finish(f2)''' loop = parse(ops) + done_number = self.cpu.get_fail_descr_number(loop.operations[-1].descr) looptoken = LoopToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) @@ -1846,6 +1847,20 @@ assert self.cpu.get_latest_value_float(0) == 13.5 assert called + # test the fast path, which should not call assembler_helper() + del called[:] + self.cpu.done_with_this_frame_float_v = done_number + try: + othertoken = LoopToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.set_future_value_float(0, 1.2) + self.cpu.set_future_value_float(1, 3.2) + res = self.cpu.execute_token(othertoken) + assert self.cpu.get_latest_value_float(0) == 1.2 + 3.2 + assert not called + finally: + del self.cpu.done_with_this_frame_float_v + def test_raw_malloced_getarrayitem(self): ARRAY = rffi.CArray(lltype.Signed) descr = self.cpu.arraydescrof(ARRAY) @@ -1870,6 +1885,78 @@ assert a[5] == 12345 lltype.free(a, flavor='raw') + def test_redirect_call_assembler(self): + called = [] + def assembler_helper(failindex, virtualizable): + assert self.cpu.get_latest_value_float(0) == 1.25 + 3.25 + called.append(failindex) + return 13.5 + + FUNCPTR = lltype.Ptr(lltype.FuncType([lltype.Signed, llmemory.GCREF], + lltype.Float)) + class FakeJitDriverSD: + index_of_virtualizable = -1 + _assembler_helper_ptr = llhelper(FUNCPTR, assembler_helper) + assembler_helper_adr = llmemory.cast_ptr_to_adr( + _assembler_helper_ptr) + + ARGS = [lltype.Float, lltype.Float] + RES = lltype.Float + FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( + lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES) + + ops = ''' + [f0, f1] + f2 = float_add(f0, f1) + finish(f2)''' + loop = parse(ops) + looptoken = LoopToken() + looptoken.outermost_jitdriver_sd = FakeJitDriverSD() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.set_future_value_float(0, 1.25) + self.cpu.set_future_value_float(1, 2.35) + res = self.cpu.execute_token(looptoken) + assert self.cpu.get_latest_value_float(0) == 1.25 + 2.35 + assert not called + + ops = ''' + [f4, f5] + f3 = call_assembler(f4, f5, descr=looptoken) + guard_not_forced()[] + finish(f3) + ''' + loop = parse(ops, namespace=locals()) + othertoken = LoopToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + + # normal call_assembler: goes to looptoken + self.cpu.set_future_value_float(0, 1.25) + self.cpu.set_future_value_float(1, 3.25) + res = self.cpu.execute_token(othertoken) + assert self.cpu.get_latest_value_float(0) == 13.5 + assert called + del called[:] + + # compile a replacement + ops = ''' + [f0, f1] + f2 = float_sub(f0, f1) + finish(f2)''' + loop = parse(ops) + looptoken2 = LoopToken() + looptoken2.outermost_jitdriver_sd = FakeJitDriverSD() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken2) + + # install it + self.cpu.redirect_call_assembler(looptoken, looptoken2) + + # now, our call_assembler should go to looptoken2 + self.cpu.set_future_value_float(0, 6.0) + self.cpu.set_future_value_float(1, 1.5) # 6.0-1.5 == 1.25+3.25 + res = self.cpu.execute_token(othertoken) + assert self.cpu.get_latest_value_float(0) == 13.5 + assert called + class OOtypeBackendTest(BaseBackendTest): Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py Tue Sep 14 16:34:16 2010 @@ -328,6 +328,7 @@ looptoken._x86_direct_bootstrap_code = self.mc.tell() self._assemble_bootstrap_direct_call(arglocs, curadr, frame_depth+param_depth) + # debug_print("Loop #", looptoken.number, "has address", looptoken._x86_loop_code, "to", self.mc.tell()) self.mc.end_function() @@ -527,7 +528,6 @@ assert isinstance(loc, StackLoc) self.mc.MOVSD_bx(loc.value, xmmtmp.value) self.mc.JMP_l(jmpadr) - return adr_stackadjust def _assemble_bootstrap_direct_call_64(self, arglocs, jmpadr, stackdepth): # XXX: Very similar to _emit_call_64 @@ -580,9 +580,23 @@ # clobber the scratch register self.mc.MOV(loc, X86_64_SCRATCH_REG) + finaljmp = self.mc.tell() self.mc.JMP(imm(jmpadr)) - return adr_stackadjust + def redirect_call_assembler(self, oldlooptoken, newlooptoken): + # some minimal sanity checking + oldnonfloatlocs, oldfloatlocs = oldlooptoken._x86_arglocs + newnonfloatlocs, newfloatlocs = newlooptoken._x86_arglocs + assert len(oldnonfloatlocs) == len(newnonfloatlocs) + assert len(oldfloatlocs) == len(newfloatlocs) + # we overwrite the instructions at the old _x86_direct_bootstrap_code + # to start with a JMP to the new _x86_direct_bootstrap_code. + # Ideally we should rather patch all existing CALLs, but well. + oldadr = oldlooptoken._x86_direct_bootstrap_code + target = newlooptoken._x86_direct_bootstrap_code + mc = codebuf.InMemoryCodeBuilder(oldadr, oldadr + 16) + mc.JMP(imm(target)) + mc.done() def _assemble_bootstrap_code(self, inputargs, arglocs): nonfloatlocs, floatlocs = arglocs Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py Tue Sep 14 16:34:16 2010 @@ -665,13 +665,11 @@ self._consider_call(op, guard_op) def consider_call_assembler(self, op, guard_op): - portal_calldescr = self.assembler.cpu.portal_calldescr - size = portal_calldescr.get_result_size(self.translate_support_code) - # descr = op.descr assert isinstance(descr, LoopToken) jd = descr.outermost_jitdriver_sd assert jd is not None + size = jd.portal_calldescr.get_result_size(self.translate_support_code) vable_index = jd.index_of_virtualizable if vable_index >= 0: self.rm._sync_var(op.args[vable_index]) Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/runner.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/runner.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/runner.py Tue Sep 14 16:34:16 2010 @@ -134,6 +134,9 @@ assert fail_index == fail_index_2 return faildescr + def redirect_call_assembler(self, oldlooptoken, newlooptoken): + self.assembler.redirect_call_assembler(oldlooptoken, newlooptoken) + class CPU386(AbstractX86CPU): WORD = 4 NUM_REGS = 8 Modified: pypy/branch/fast-forward/pypy/jit/metainterp/compile.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/compile.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/compile.py Tue Sep 14 16:34:16 2010 @@ -14,6 +14,7 @@ from pypy.jit.metainterp.specnode import NotSpecNode, more_general_specnodes from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.jit.metainterp.optimizeutil import InvalidLoop +from pypy.jit.codewriter import heaptracker def giveup(): from pypy.jit.metainterp.pyjitpl import SwitchToBlackhole @@ -57,12 +58,9 @@ loop.inputargs = history.inputargs for box in loop.inputargs: assert isinstance(box, Box) - if start > 0: - ops = history.operations[start:] - else: - ops = history.operations # make a copy, because optimize_loop can mutate the ops and descrs - loop.operations = [op.clone() for op in ops] + h_ops = history.operations + loop.operations = [h_ops[i].clone() for i in range(start, len(h_ops))] metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd) @@ -550,3 +548,55 @@ descr = target_loop_token.finishdescr new_op = ResOperation(rop.FINISH, op.args, None, descr=descr) new_loop.operations[-1] = new_op + +# ____________________________________________________________ + +class PropagateExceptionDescr(AbstractFailDescr): + def handle_fail(self, metainterp_sd, jitdriver_sd): + cpu = metainterp_sd.cpu + exception = cpu.grab_exc_value() + raise metainterp_sd.ExitFrameWithExceptionRef(cpu, exception) + +propagate_exception_descr = PropagateExceptionDescr() + +def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redboxes): + """Make a LoopToken that corresponds to assembler code that just + calls back the interpreter. Used temporarily: a fully compiled + version of the code may end up replacing it. + """ + # 'redboxes' is only used to know the types of red arguments. + inputargs = [box.clonebox() for box in redboxes] + loop_token = make_loop_token(len(inputargs), jitdriver_sd) + # 'nb_red_args' might be smaller than len(redboxes), + # because it doesn't include the virtualizable boxes. + nb_red_args = jitdriver_sd.num_red_args + k = jitdriver_sd.portal_runner_adr + funcbox = history.ConstInt(heaptracker.adr2int(k)) + callargs = [funcbox] + greenboxes + inputargs[:nb_red_args] + # + result_type = jitdriver_sd.result_type + if result_type == history.INT: + result = BoxInt() + elif result_type == history.REF: + result = BoxPtr() + elif result_type == history.FLOAT: + result = BoxFloat() + elif result_type == history.VOID: + result = None + else: + assert 0, "bad result_type" + if result is not None: + finishargs = [result] + else: + finishargs = [] + # + jd = jitdriver_sd + faildescr = propagate_exception_descr + operations = [ + ResOperation(rop.CALL, callargs, result, descr=jd.portal_calldescr), + ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=faildescr), + ResOperation(rop.FINISH, finishargs, None, descr=jd.portal_finishtoken) + ] + operations[1].fail_args = [] + cpu.compile_loop(inputargs, operations, loop_token) + return loop_token Modified: pypy/branch/fast-forward/pypy/jit/metainterp/jitdriver.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/jitdriver.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/jitdriver.py Tue Sep 14 16:34:16 2010 @@ -8,11 +8,14 @@ # self.portal_graph ... pypy.jit.metainterp.warmspot # self.portal_runner_ptr ... pypy.jit.metainterp.warmspot # self.portal_runner_adr ... pypy.jit.metainterp.warmspot + # self.portal_calldescr ... pypy.jit.metainterp.warmspot # self.num_green_args ... pypy.jit.metainterp.warmspot + # self.num_red_args ... pypy.jit.metainterp.warmspot # self.result_type ... pypy.jit.metainterp.warmspot # self.virtualizable_info... pypy.jit.metainterp.warmspot # self.warmstate ... pypy.jit.metainterp.warmspot # self.handle_jitexc_from_bh pypy.jit.metainterp.warmspot + # self.portal_finishtoken... pypy.jit.metainterp.pyjitpl # self.index ... pypy.jit.codewriter.call # self.mainjitcode ... pypy.jit.codewriter.call @@ -20,6 +23,7 @@ # self.assembler_helper_adr # self.index_of_virtualizable # self.vable_token_descr + # self.portal_calldescr # warmspot sets extra attributes starting with '_' for its own use. Modified: pypy/branch/fast-forward/pypy/jit/metainterp/pyjitpl.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/pyjitpl.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/pyjitpl.py Tue Sep 14 16:34:16 2010 @@ -690,25 +690,27 @@ targetjitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex] allboxes = greenboxes + redboxes warmrunnerstate = targetjitdriver_sd.warmstate - token = None + assembler_call = False if warmrunnerstate.inlining: if warmrunnerstate.can_inline_callable(greenboxes): portal_code = targetjitdriver_sd.mainjitcode return self.metainterp.perform_call(portal_code, allboxes, greenkey=greenboxes) - token = warmrunnerstate.get_assembler_token(greenboxes) + assembler_call = True # verify that we have all green args, needed to make sure # that assembler that we call is still correct self.verify_green_args(targetjitdriver_sd, greenboxes) # - return self.do_recursive_call(targetjitdriver_sd, allboxes, token) + return self.do_recursive_call(targetjitdriver_sd, allboxes, + assembler_call) - def do_recursive_call(self, targetjitdriver_sd, allboxes, token=None): + def do_recursive_call(self, targetjitdriver_sd, allboxes, + assembler_call=False): portal_code = targetjitdriver_sd.mainjitcode k = targetjitdriver_sd.portal_runner_adr funcbox = ConstInt(heaptracker.adr2int(k)) - return self.do_residual_call(funcbox, portal_code.calldescr, - allboxes, assembler_call_token=token, + return self.do_residual_call(funcbox, portal_code.calldescr, allboxes, + assembler_call=assembler_call, assembler_call_jd=targetjitdriver_sd) opimpl_recursive_call_i = _opimpl_recursive_call @@ -828,8 +830,6 @@ self.metainterp.reached_loop_header(greenboxes, redboxes) self.pc = saved_pc else: - warmrunnerstate = jitdriver_sd.warmstate - token = warmrunnerstate.get_assembler_token(greenboxes) # warning! careful here. We have to return from the current # frame containing the jit_merge_point, and then use # do_recursive_call() to follow the recursive call. This is @@ -843,7 +843,8 @@ except ChangeFrame: pass frame = self.metainterp.framestack[-1] - frame.do_recursive_call(jitdriver_sd, greenboxes + redboxes, token) + frame.do_recursive_call(jitdriver_sd, greenboxes + redboxes, + assembler_call=True) raise ChangeFrame def debug_merge_point(self, jitdriver_sd, greenkey): @@ -1058,7 +1059,7 @@ return resbox def do_residual_call(self, funcbox, descr, argboxes, - assembler_call_token=None, + assembler_call=False, assembler_call_jd=None): # First build allboxes: it may need some reordering from the # list provided in argboxes, depending on the order in which @@ -1096,16 +1097,15 @@ if (effectinfo is None or effectinfo.extraeffect == effectinfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE or - assembler_call_token is not None): + assembler_call): # residual calls require attention to keep virtualizables in-sync self.metainterp.clear_exception() self.metainterp.vable_and_vrefs_before_residual_call() resbox = self.metainterp.execute_and_record_varargs( rop.CALL_MAY_FORCE, allboxes, descr=descr) self.metainterp.vrefs_after_residual_call() - if assembler_call_token is not None: - self.metainterp.direct_assembler_call(assembler_call_token, - assembler_call_jd) + if assembler_call: + self.metainterp.direct_assembler_call(assembler_call_jd) if resbox is not None: self.make_result_of_lastop(resbox) self.metainterp.vable_after_residual_call() @@ -1217,6 +1217,7 @@ history.FLOAT: 'float', history.VOID: 'void'}[jd.result_type] tokens = getattr(self, 'loop_tokens_done_with_this_frame_%s' % name) + jd.portal_finishtoken = tokens[0].finishdescr num = self.cpu.get_fail_descr_number(tokens[0].finishdescr) setattr(self.cpu, 'done_with_this_frame_%s_v' % name, num) # @@ -2103,20 +2104,24 @@ op.args = [resbox_as_const] + op.args return resbox - def direct_assembler_call(self, token, targetjitdriver_sd): + def direct_assembler_call(self, targetjitdriver_sd): """ Generate a direct call to assembler for portal entry point, patching the CALL_MAY_FORCE that occurred just now. """ op = self.history.operations.pop() assert op.opnum == rop.CALL_MAY_FORCE num_green_args = targetjitdriver_sd.num_green_args - args = op.args[num_green_args + 1:] + greenargs = op.args[1:num_green_args+1] + args = op.args[num_green_args+1:] + assert len(args) == targetjitdriver_sd.num_red_args vinfo = targetjitdriver_sd.virtualizable_info if vinfo is not None: index = targetjitdriver_sd.index_of_virtualizable vbox = args[index] args = args + self.gen_load_from_other_virtualizable(vinfo, vbox) # ^^^ and not "+=", which makes 'args' a resizable list + warmrunnerstate = targetjitdriver_sd.warmstate + token = warmrunnerstate.get_assembler_token(greenargs, args) op.opnum = rop.CALL_ASSEMBLER op.args = args op.descr = token Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_basic.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_basic.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_basic.py Tue Sep 14 16:34:16 2010 @@ -455,6 +455,31 @@ # the CALL_PURE is constant-folded away by optimizeopt.py self.check_loops(int_sub=1, call=0, call_pure=0) + def test_pure_function_returning_object(self): + myjitdriver = JitDriver(greens = ['m'], reds = ['n']) + class V: + def __init__(self, x): + self.x = x + v1 = V(1) + v2 = V(2) + def externfn(x): + if x: + return v1 + else: + return v2 + externfn._pure_function_ = True + def f(n, m): + while n > 0: + myjitdriver.can_enter_jit(n=n, m=m) + myjitdriver.jit_merge_point(n=n, m=m) + m = V(m).x + n -= externfn(m).x + externfn(m + m - m).x + return n + res = self.meta_interp(f, [21, 5]) + assert res == -1 + # the CALL_PURE is constant-folded away by optimizeopt.py + self.check_loops(int_sub=1, call=0, call_pure=0, getfield_gc=1) + def test_constant_across_mp(self): myjitdriver = JitDriver(greens = [], reds = ['n']) class X(object): @@ -530,6 +555,32 @@ assert res == -2 self.check_loop_count(1) + def test_can_never_inline(self): + def can_never_inline(x): + return x > 50 + myjitdriver = JitDriver(greens = ['x'], reds = ['y'], + can_never_inline = can_never_inline) + @dont_look_inside + def marker(): + pass + def f(x, y): + while y >= 0: + myjitdriver.can_enter_jit(x=x, y=y) + myjitdriver.jit_merge_point(x=x, y=y) + x += 1 + if x == 4 or x == 61: + marker() + y -= x + return y + # + res = self.meta_interp(f, [3, 6], repeat=7) + assert res == 6 - 4 - 5 + self.check_history(call=0) # because the trace starts in the middle + # + res = self.meta_interp(f, [60, 84], repeat=7) + assert res == 84 - 61 - 62 + self.check_history(call=1) # because the trace starts immediately + def test_format(self): def f(n): return len("<%d>" % n) Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_compile.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_compile.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_compile.py Tue Sep 14 16:34:16 2010 @@ -1,10 +1,11 @@ from pypy.jit.metainterp.history import LoopToken, ConstInt, History, Stats -from pypy.jit.metainterp.history import BoxInt +from pypy.jit.metainterp.history import BoxInt, INT from pypy.jit.metainterp.specnode import NotSpecNode, ConstantSpecNode from pypy.jit.metainterp.compile import insert_loop_token, compile_new_loop from pypy.jit.metainterp.compile import ResumeGuardDescr from pypy.jit.metainterp.compile import ResumeGuardCountersInt -from pypy.jit.metainterp import optimize, jitprof, typesystem +from pypy.jit.metainterp.compile import compile_tmp_callback +from pypy.jit.metainterp import optimize, jitprof, typesystem, compile from pypy.jit.metainterp.test.oparser import parse from pypy.jit.metainterp.test.test_optimizefindnode import LLtypeMixin @@ -154,3 +155,67 @@ count = rgc.see_int(192) assert count == 1 assert rgc.counters == [1, 1, 7, 6, 1] + + +def test_compile_tmp_callback(): + from pypy.jit.codewriter import heaptracker + from pypy.jit.backend.llgraph import runner + from pypy.rpython.lltypesystem import lltype, llmemory + from pypy.rpython.annlowlevel import llhelper + from pypy.rpython.llinterp import LLException + # + cpu = runner.LLtypeCPU(None) + FUNC = lltype.FuncType([lltype.Signed]*4, lltype.Signed) + def ll_portal_runner(g1, g2, r3, r4): + assert (g1, g2, r3, r4) == (12, 34, -156, -178) + if raiseme: + raise raiseme + else: + return 54321 + # + class FakeJitDriverSD: + portal_runner_ptr = llhelper(lltype.Ptr(FUNC), ll_portal_runner) + portal_runner_adr = llmemory.cast_ptr_to_adr(portal_runner_ptr) + portal_calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + portal_finishtoken = compile.DoneWithThisFrameDescrInt() + num_red_args = 2 + result_type = INT + # + loop_token = compile_tmp_callback(cpu, FakeJitDriverSD(), + [ConstInt(12), ConstInt(34)], + [BoxInt(56), ConstInt(78), BoxInt(90)]) + # + raiseme = None + cpu.set_future_value_int(0, -156) + cpu.set_future_value_int(1, -178) + cpu.set_future_value_int(2, -190) # passed in, but dropped + fail_descr = cpu.execute_token(loop_token) + assert fail_descr is FakeJitDriverSD().portal_finishtoken + # + EXC = lltype.GcStruct('EXC') + llexc = lltype.malloc(EXC) + raiseme = LLException("exception class", llexc) + cpu.set_future_value_int(0, -156) + cpu.set_future_value_int(1, -178) + cpu.set_future_value_int(2, -190) + fail_descr = cpu.execute_token(loop_token) + assert isinstance(fail_descr, compile.PropagateExceptionDescr) + got = cpu.grab_exc_value() + assert lltype.cast_opaque_ptr(lltype.Ptr(EXC), got) == llexc + # + class FakeMetaInterpSD: + class ExitFrameWithExceptionRef(Exception): + pass + FakeMetaInterpSD.cpu = cpu + class FakeJitDriverSD: + pass + cpu.set_future_value_int(0, -156) + cpu.set_future_value_int(1, -178) + cpu.set_future_value_int(2, -190) + fail_descr = cpu.execute_token(loop_token) + try: + fail_descr.handle_fail(FakeMetaInterpSD(), FakeJitDriverSD()) + except FakeMetaInterpSD.ExitFrameWithExceptionRef, e: + assert lltype.cast_opaque_ptr(lltype.Ptr(EXC), e.args[1]) == llexc + else: + assert 0, "should have raised" Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py Tue Sep 14 16:34:16 2010 @@ -4,7 +4,8 @@ #OOtypeMixin, BaseTest) from pypy.jit.metainterp.optimizefindnode import PerfectSpecializationFinder -from pypy.jit.metainterp import optimizeopt +import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt +import pypy.jit.metainterp.optimizeopt.virtualize as virtualize from pypy.jit.metainterp.optimizeopt import optimize_loop_1 from pypy.jit.metainterp.optimizeutil import InvalidLoop from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt @@ -64,7 +65,7 @@ class cpu(object): pass opt = FakeOptimizer() - virt1 = optimizeopt.AbstractVirtualStructValue(opt, None) + virt1 = virtualize.AbstractVirtualStructValue(opt, None) lst1 = virt1._get_field_descr_list() assert lst1 == [] lst2 = virt1._get_field_descr_list() @@ -75,7 +76,7 @@ lst4 = virt1._get_field_descr_list() assert lst3 is lst4 - virt2 = optimizeopt.AbstractVirtualStructValue(opt, None) + virt2 = virtualize.AbstractVirtualStructValue(opt, None) lst5 = virt2._get_field_descr_list() assert lst5 is lst1 virt2.setfield(LLtypeMixin.valuedescr, optimizeopt.OptValue(None)) @@ -88,7 +89,7 @@ self.fieldnums = fieldnums def equals(self, fieldnums): return self.fieldnums == fieldnums - class FakeVirtualValue(optimizeopt.AbstractVirtualValue): + class FakeVirtualValue(virtualize.AbstractVirtualValue): def _make_virtual(self, *args): return FakeVInfo() v1 = FakeVirtualValue(None, None, None) @@ -257,6 +258,7 @@ optimize_loop_1(metainterp_sd, loop) # expected = self.parse(optops) + print '\n'.join([str(o) for o in loop.operations]) self.assert_equal(loop, expected) def test_simple(self): @@ -266,7 +268,13 @@ guard_value(i0, 0) [i0] jump(i) """ - self.optimize_loop(ops, 'Not', ops) + expected = """ + [i] + i0 = int_sub(i, 1) + guard_value(i0, 0) [i0] + jump(1) + """ + self.optimize_loop(ops, 'Not', expected) def test_constant_propagate(self): ops = """ @@ -680,7 +688,13 @@ guard_value(i1, 0) [i] jump(i) """ - self.optimize_loop(ops, 'Not', ops) + expected = """ + [i] + i1 = int_add(i, 3) + guard_value(i1, 0) [i] + jump(-3) + """ + self.optimize_loop(ops, 'Not', expected) def test_int_is_true_of_bool(self): ops = """ @@ -3089,6 +3103,724 @@ ''' self.optimize_loop(ops, 'Not', expected) + def test_bound_lt(self): + ops = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + i2 = int_lt(i0, 5) + guard_true(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_noguard(self): + ops = """ + [i0] + i1 = int_lt(i0, 4) + i2 = int_lt(i0, 5) + jump(i2) + """ + expected = """ + [i0] + i1 = int_lt(i0, 4) + i2 = int_lt(i0, 5) + jump(i2) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_noopt(self): + ops = """ + [i0] + i1 = int_lt(i0, 4) + guard_false(i1) [] + i2 = int_lt(i0, 5) + guard_true(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_lt(i0, 4) + guard_false(i1) [] + i2 = int_lt(i0, 5) + guard_true(i2) [] + jump(4) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_rev(self): + ops = """ + [i0] + i1 = int_lt(i0, 4) + guard_false(i1) [] + i2 = int_gt(i0, 3) + guard_true(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_lt(i0, 4) + guard_false(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_tripple(self): + ops = """ + [i0] + i1 = int_lt(i0, 0) + guard_true(i1) [] + i2 = int_lt(i0, 7) + guard_true(i2) [] + i3 = int_lt(i0, 5) + guard_true(i3) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_lt(i0, 0) + guard_true(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_add(self): + ops = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + i2 = int_add(i0, 10) + i3 = int_lt(i2, 15) + guard_true(i3) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + i2 = int_add(i0, 10) + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_add_before(self): + ops = """ + [i0] + i2 = int_add(i0, 10) + i3 = int_lt(i2, 15) + guard_true(i3) [] + i1 = int_lt(i0, 6) + guard_true(i1) [] + jump(i0) + """ + expected = """ + [i0] + i2 = int_add(i0, 10) + i3 = int_lt(i2, 15) + guard_true(i3) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_add_ovf(self): + ops = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + i2 = int_add_ovf(i0, 10) + guard_no_overflow() [] + i3 = int_lt(i2, 15) + guard_true(i3) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + i2 = int_add(i0, 10) + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_add_ovf_before(self): + ops = """ + [i0] + i2 = int_add_ovf(i0, 10) + guard_no_overflow() [] + i3 = int_lt(i2, 15) + guard_true(i3) [] + i1 = int_lt(i0, 6) + guard_true(i1) [] + jump(i0) + """ + expected = """ + [i0] + i2 = int_add_ovf(i0, 10) + guard_no_overflow() [] + i3 = int_lt(i2, 15) + guard_true(i3) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_sub(self): + ops = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + i2 = int_sub(i0, 10) + i3 = int_lt(i2, -5) + guard_true(i3) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + i2 = int_sub(i0, 10) + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lt_sub_before(self): + ops = """ + [i0] + i2 = int_sub(i0, 10) + i3 = int_lt(i2, -5) + guard_true(i3) [] + i1 = int_lt(i0, 5) + guard_true(i1) [] + jump(i0) + """ + expected = """ + [i0] + i2 = int_sub(i0, 10) + i3 = int_lt(i2, -5) + guard_true(i3) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_ltle(self): + ops = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + i2 = int_le(i0, 3) + guard_true(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_lt(i0, 4) + guard_true(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_lelt(self): + ops = """ + [i0] + i1 = int_le(i0, 4) + guard_true(i1) [] + i2 = int_lt(i0, 5) + guard_true(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_le(i0, 4) + guard_true(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_gt(self): + ops = """ + [i0] + i1 = int_gt(i0, 5) + guard_true(i1) [] + i2 = int_gt(i0, 4) + guard_true(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_gt(i0, 5) + guard_true(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_gtge(self): + ops = """ + [i0] + i1 = int_gt(i0, 5) + guard_true(i1) [] + i2 = int_ge(i0, 6) + guard_true(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_gt(i0, 5) + guard_true(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_gegt(self): + ops = """ + [i0] + i1 = int_ge(i0, 5) + guard_true(i1) [] + i2 = int_gt(i0, 4) + guard_true(i2) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_ge(i0, 5) + guard_true(i1) [] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_ovf(self): + ops = """ + [i0] + i1 = int_ge(i0, 0) + guard_true(i1) [] + i2 = int_lt(i0, 10) + guard_true(i2) [] + i3 = int_add_ovf(i0, 1) + guard_no_overflow() [] + jump(i3) + """ + expected = """ + [i0] + i1 = int_ge(i0, 0) + guard_true(i1) [] + i2 = int_lt(i0, 10) + guard_true(i2) [] + i3 = int_add(i0, 1) + jump(i3) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_addsub_const(self): + ops = """ + [i0] + i1 = int_add(i0, 1) + i2 = int_sub(i1, 1) + i3 = int_add(i2, 1) + i4 = int_mul(i2, i3) + jump(i4) + """ + expected = """ + [i0] + i1 = int_add(i0, 1) + i4 = int_mul(i0, i1) + jump(i4) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_addsub_int(self): + ops = """ + [i0, i10] + i1 = int_add(i0, i10) + i2 = int_sub(i1, i10) + i3 = int_add(i2, i10) + i4 = int_add(i2, i3) + jump(i4, i10) + """ + expected = """ + [i0, i10] + i1 = int_add(i0, i10) + i4 = int_add(i0, i1) + jump(i4, i10) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_addsub_int2(self): + ops = """ + [i0, i10] + i1 = int_add(i10, i0) + i2 = int_sub(i1, i10) + i3 = int_add(i10, i2) + i4 = int_add(i2, i3) + jump(i4, i10) + """ + expected = """ + [i0, i10] + i1 = int_add(i10, i0) + i4 = int_add(i0, i1) + jump(i4, i10) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_framestackdepth_overhead(self): + ops = """ + [p0, i22] + i1 = getfield_gc(p0, descr=valuedescr) + i2 = int_gt(i1, i22) + guard_false(i2) [] + i3 = int_add(i1, 1) + setfield_gc(p0, i3, descr=valuedescr) + i4 = int_sub(i3, 1) + setfield_gc(p0, i4, descr=valuedescr) + i5 = int_gt(i4, i22) + guard_false(i5) [] + i6 = int_add(i4, 1) + i331 = force_token() + i7 = int_sub(i6, 1) + setfield_gc(p0, i7, descr=valuedescr) + jump(p0, i22) + """ + expected = """ + [p0, i22] + i1 = getfield_gc(p0, descr=valuedescr) + i2 = int_gt(i1, i22) + guard_false(i2) [] + i3 = int_add(i1, 1) + i331 = force_token() + setfield_gc(p0, i1, descr=valuedescr) + jump(p0, i22) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_addsub_ovf(self): + ops = """ + [i0] + i1 = int_add_ovf(i0, 10) + guard_no_overflow() [] + i2 = int_sub_ovf(i1, 5) + guard_no_overflow() [] + jump(i2) + """ + expected = """ + [i0] + i1 = int_add_ovf(i0, 10) + guard_no_overflow() [] + i2 = int_sub(i1, 5) + jump(i2) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_subadd_ovf(self): + ops = """ + [i0] + i1 = int_sub_ovf(i0, 10) + guard_no_overflow() [] + i2 = int_add_ovf(i1, 5) + guard_no_overflow() [] + jump(i2) + """ + expected = """ + [i0] + i1 = int_sub_ovf(i0, 10) + guard_no_overflow() [] + i2 = int_add(i1, 5) + jump(i2) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_and(self): + ops = """ + [i0] + i1 = int_and(i0, 255) + i2 = int_lt(i1, 500) + guard_true(i2) [] + i3 = int_le(i1, 255) + guard_true(i3) [] + i4 = int_gt(i1, -1) + guard_true(i4) [] + i5 = int_ge(i1, 0) + guard_true(i5) [] + i6 = int_lt(i1, 0) + guard_false(i6) [] + i7 = int_le(i1, -1) + guard_false(i7) [] + i8 = int_gt(i1, 255) + guard_false(i8) [] + i9 = int_ge(i1, 500) + guard_false(i9) [] + i12 = int_lt(i1, 100) + guard_true(i12) [] + i13 = int_le(i1, 90) + guard_true(i13) [] + i14 = int_gt(i1, 10) + guard_true(i14) [] + i15 = int_ge(i1, 20) + guard_true(i15) [] + jump(i1) + """ + expected = """ + [i0] + i1 = int_and(i0, 255) + i12 = int_lt(i1, 100) + guard_true(i12) [] + i13 = int_le(i1, 90) + guard_true(i13) [] + i14 = int_gt(i1, 10) + guard_true(i14) [] + i15 = int_ge(i1, 20) + guard_true(i15) [] + jump(i1) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_subsub_ovf(self): + ops = """ + [i0] + i1 = int_sub_ovf(1, i0) + guard_no_overflow() [] + i2 = int_gt(i1, 1) + guard_true(i2) [] + i3 = int_sub_ovf(1, i0) + guard_no_overflow() [] + i4 = int_gt(i3, 1) + guard_true(i4) [] + jump(i0) + """ + expected = """ + [i0] + i1 = int_sub_ovf(1, i0) + guard_no_overflow() [] + i2 = int_gt(i1, 1) + guard_true(i2) [] + i3 = int_sub(1, i0) + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_eq(self): + ops = """ + [i0, i1] + i2 = int_le(i0, 4) + guard_true(i2) [] + i3 = int_eq(i0, i1) + guard_true(i3) [] + i4 = int_lt(i1, 5) + guard_true(i4) [] + jump(i0, i1) + """ + expected = """ + [i0, i1] + i2 = int_le(i0, 4) + guard_true(i2) [] + i3 = int_eq(i0, i1) + guard_true(i3) [] + jump(i0, i1) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_bound_eq_const(self): + ops = """ + [i0] + i1 = int_eq(i0, 7) + guard_true(i1) [] + i2 = int_add(i0, 3) + jump(i2) + """ + expected = """ + [i0] + i1 = int_eq(i0, 7) + guard_true(i1) [] + jump(10) + + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_eq_const_not(self): + ops = """ + [i0] + i1 = int_eq(i0, 7) + guard_false(i1) [] + i2 = int_add(i0, 3) + jump(i2) + """ + expected = """ + [i0] + i1 = int_eq(i0, 7) + guard_false(i1) [] + i2 = int_add(i0, 3) + jump(i2) + + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_ne_const(self): + ops = """ + [i0] + i1 = int_ne(i0, 7) + guard_false(i1) [] + i2 = int_add(i0, 3) + jump(i2) + """ + expected = """ + [i0] + i1 = int_ne(i0, 7) + guard_false(i1) [] + jump(10) + + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_ne_const_not(self): + ops = """ + [i0] + i1 = int_ne(i0, 7) + guard_true(i1) [] + i2 = int_add(i0, 3) + jump(i2) + """ + expected = """ + [i0] + i1 = int_ne(i0, 7) + guard_true(i1) [] + i2 = int_add(i0, 3) + jump(i2) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_bound_ltne(self): + ops = """ + [i0, i1] + i2 = int_lt(i0, 7) + guard_true(i2) [] + i3 = int_ne(i0, 10) + guard_true(i2) [] + jump(i0, i1) + """ + expected = """ + [i0, i1] + i2 = int_lt(i0, 7) + guard_true(i2) [] + jump(i0, i1) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_bound_lege_const(self): + ops = """ + [i0] + i1 = int_ge(i0, 7) + guard_true(i1) [] + i2 = int_le(i0, 7) + guard_true(i2) [] + i3 = int_add(i0, 3) + jump(i3) + """ + expected = """ + [i0] + i1 = int_ge(i0, 7) + guard_true(i1) [] + i2 = int_le(i0, 7) + guard_true(i2) [] + jump(10) + + """ + self.optimize_loop(ops, 'Not', expected) + + def test_mul_ovf(self): + ops = """ + [i0, i1] + i2 = int_and(i0, 255) + i3 = int_lt(i1, 5) + guard_true(i3) [] + i4 = int_gt(i1, -10) + guard_true(i4) [] + i5 = int_mul_ovf(i2, i1) + guard_no_overflow() [] + i6 = int_lt(i5, -2550) + guard_false(i6) [] + i7 = int_ge(i5, 1276) + guard_false(i7) [] + i8 = int_gt(i5, 126) + guard_true(i8) [] + jump(i0, i1) + """ + expected = """ + [i0, i1] + i2 = int_and(i0, 255) + i3 = int_lt(i1, 5) + guard_true(i3) [] + i4 = int_gt(i1, -10) + guard_true(i4) [] + i5 = int_mul(i2, i1) + i8 = int_gt(i5, 126) + guard_true(i8) [] + jump(i0, i1) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_mul_ovf_before(self): + ops = """ + [i0, i1] + i2 = int_and(i0, 255) + i22 = int_add(i2, 1) + i3 = int_mul_ovf(i22, i1) + guard_no_overflow() [] + i4 = int_lt(i3, 10) + guard_true(i4) [] + i5 = int_gt(i3, 2) + guard_true(i5) [] + i6 = int_lt(i1, 0) + guard_false(i6) [] + jump(i0, i1) + """ + expected = """ + [i0, i1] + i2 = int_and(i0, 255) + i22 = int_add(i2, 1) + i3 = int_mul_ovf(i22, i1) + guard_no_overflow() [] + i4 = int_lt(i3, 10) + guard_true(i4) [] + i5 = int_gt(i3, 2) + guard_true(i5) [] + jump(i0, i1) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_sub_ovf_before(self): + ops = """ + [i0, i1] + i2 = int_and(i0, 255) + i3 = int_sub_ovf(i2, i1) + guard_no_overflow() [] + i4 = int_le(i3, 10) + guard_true(i4) [] + i5 = int_ge(i3, 2) + guard_true(i5) [] + i6 = int_lt(i1, -10) + guard_false(i6) [] + i7 = int_gt(i1, 253) + guard_false(i7) [] + jump(i0, i1) + """ + expected = """ + [i0, i1] + i2 = int_and(i0, 255) + i3 = int_sub_ovf(i2, i1) + guard_no_overflow() [] + i4 = int_le(i3, 10) + guard_true(i4) [] + i5 = int_ge(i3, 2) + guard_true(i5) [] + jump(i0, i1) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + + + ##class TestOOtype(BaseTestOptimizeOpt, OOtypeMixin): ## def test_instanceof(self): Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_recursive.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_recursive.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_recursive.py Tue Sep 14 16:34:16 2010 @@ -612,9 +612,88 @@ driver.can_enter_jit(codeno=codeno, i=i, j=j) portal(2, 50) - self.meta_interp(portal, [2, 20], inline=True) - self.check_loops(call_assembler=0, call_may_force=1, - everywhere=True) + + from pypy.jit.metainterp import compile, pyjitpl + pyjitpl._warmrunnerdesc = None + trace = [] + def my_ctc(*args): + looptoken = original_ctc(*args) + trace.append(looptoken) + return looptoken + original_ctc = compile.compile_tmp_callback + try: + compile.compile_tmp_callback = my_ctc + self.meta_interp(portal, [2, 20], inline=True) + self.check_loops(call_assembler=1, call_may_force=0, + everywhere=True) + finally: + compile.compile_tmp_callback = original_ctc + # check that we made a temporary callback + assert len(trace) == 1 + # and that we later redirected it to something else + try: + redirected = pyjitpl._warmrunnerdesc.cpu._redirected_call_assembler + except AttributeError: + pass # not the llgraph backend + else: + print redirected + assert redirected.keys() == trace + + def test_recursion_cant_call_assembler_directly_with_virtualizable(self): + # exactly the same logic as the previous test, but with 'frame.j' + # instead of just 'j' + class Frame(object): + _virtualizable2_ = ['j'] + def __init__(self, j): + self.j = j + + driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], + virtualizables = ['frame'], + get_printable_location = lambda codeno : str(codeno)) + + def portal(codeno, frame): + i = 0 + while 1: + driver.jit_merge_point(codeno=codeno, i=i, frame=frame) + if i == 1: + if frame.j == 0: + return + portal(2, Frame(frame.j - 1)) + elif i == 3: + return + i += 1 + driver.can_enter_jit(codeno=codeno, i=i, frame=frame) + + def main(codeno, j): + portal(codeno, Frame(j)) + + main(2, 50) + + from pypy.jit.metainterp import compile, pyjitpl + pyjitpl._warmrunnerdesc = None + trace = [] + def my_ctc(*args): + looptoken = original_ctc(*args) + trace.append(looptoken) + return looptoken + original_ctc = compile.compile_tmp_callback + try: + compile.compile_tmp_callback = my_ctc + self.meta_interp(main, [2, 20], inline=True) + self.check_loops(call_assembler=1, call_may_force=0, + everywhere=True) + finally: + compile.compile_tmp_callback = original_ctc + # check that we made a temporary callback + assert len(trace) == 1 + # and that we later redirected it to something else + try: + redirected = pyjitpl._warmrunnerdesc.cpu._redirected_call_assembler + except AttributeError: + pass # not the llgraph backend + else: + print redirected + assert redirected.keys() == trace def test_directly_call_assembler_return(self): driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'], Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_resume.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_resume.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_resume.py Tue Sep 14 16:34:16 2010 @@ -1,7 +1,7 @@ import py from pypy.rpython.lltypesystem import lltype, llmemory, rffi -from pypy.jit.metainterp.optimizeopt import VirtualValue, OptValue, VArrayValue -from pypy.jit.metainterp.optimizeopt import VStructValue +from pypy.jit.metainterp.optimizeopt.virtualize import VirtualValue, OptValue, VArrayValue +from pypy.jit.metainterp.optimizeopt.virtualize import VStructValue from pypy.jit.metainterp.resume import * from pypy.jit.metainterp.history import BoxInt, BoxPtr, ConstInt from pypy.jit.metainterp.history import ConstPtr, ConstFloat Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_warmstate.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_warmstate.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_warmstate.py Tue Sep 14 16:34:16 2010 @@ -162,13 +162,16 @@ assert cell1.entry_loop_token == "entry loop token" def test_make_jitdriver_callbacks_1(): + class FakeWarmRunnerDesc: + cpu = None class FakeJitDriverSD: _green_args_spec = [lltype.Signed, lltype.Float] _get_printable_location_ptr = None _confirm_enter_jit_ptr = None + _can_never_inline_ptr = None class FakeCell: dont_trace_here = False - state = WarmEnterState(None, FakeJitDriverSD()) + state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) def jit_getter(build, *args): return FakeCell() state.jit_getter = jit_getter @@ -185,10 +188,12 @@ lltype.Ptr(rstr.STR))) class FakeWarmRunnerDesc: rtyper = None + cpu = None class FakeJitDriverSD: _green_args_spec = [lltype.Signed, lltype.Float] _get_printable_location_ptr = llhelper(GET_LOCATION, get_location) _confirm_enter_jit_ptr = None + _can_never_inline_ptr = None _get_jitcell_at_ptr = None state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) state.make_jitdriver_callbacks() @@ -205,13 +210,37 @@ lltype.Signed], lltype.Bool)) class FakeWarmRunnerDesc: rtyper = None + cpu = None class FakeJitDriverSD: _green_args_spec = [lltype.Signed, lltype.Float] _get_printable_location_ptr = None _confirm_enter_jit_ptr = llhelper(ENTER_JIT, confirm_enter_jit) + _can_never_inline_ptr = None _get_jitcell_at_ptr = None state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) state.make_jitdriver_callbacks() res = state.confirm_enter_jit(5, 42.5, 3) assert res is True + +def test_make_jitdriver_callbacks_5(): + def can_never_inline(x, y): + assert x == 5 + assert y == 42.5 + return True + CAN_NEVER_INLINE = lltype.Ptr(lltype.FuncType( + [lltype.Signed, lltype.Float], lltype.Bool)) + class FakeWarmRunnerDesc: + rtyper = None + cpu = None + class FakeJitDriverSD: + _green_args_spec = [lltype.Signed, lltype.Float] + _get_printable_location_ptr = None + _confirm_enter_jit_ptr = None + _can_never_inline_ptr = llhelper(CAN_NEVER_INLINE, can_never_inline) + _get_jitcell_at_ptr = None + + state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + state.make_jitdriver_callbacks() + res = state.can_never_inline(5, 42.5) + assert res is True Modified: pypy/branch/fast-forward/pypy/jit/metainterp/warmspot.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/warmspot.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/warmspot.py Tue Sep 14 16:34:16 2010 @@ -425,6 +425,8 @@ jd._confirm_enter_jit_ptr = self._make_hook_graph(jd, annhelper, jd.jitdriver.confirm_enter_jit, annmodel.s_Bool, onlygreens=False) + jd._can_never_inline_ptr = self._make_hook_graph(jd, + annhelper, jd.jitdriver.can_never_inline, annmodel.s_Bool) annhelper.finish() def _make_hook_graph(self, jitdriver_sd, annhelper, func, @@ -455,6 +457,7 @@ jd._green_args_spec = [v.concretetype for v in greens_v] jd._red_args_types = [history.getkind(v.concretetype) for v in reds_v] jd.num_green_args = len(jd._green_args_spec) + jd.num_red_args = len(jd._red_args_types) RESTYPE = graph.getreturnvar().concretetype (jd._JIT_ENTER_FUNCTYPE, jd._PTR_JIT_ENTER_FUNCTYPE) = self.cpu.ts.get_FuncType(ALLARGS, lltype.Void) @@ -582,9 +585,11 @@ ts = self.cpu.ts def ll_portal_runner(*args): + start = True while 1: try: - jd._maybe_enter_from_start_fn(*args) + if start: + jd._maybe_enter_from_start_fn(*args) return support.maybe_on_top_of_llinterp(rtyper, portal_ptr)(*args) except self.ContinueRunningNormally, e: @@ -593,6 +598,8 @@ x = getattr(e, attrname)[count] x = specialize_value(ARGTYPE, x) args = args + (x,) + start = False + continue except self.DoneWithThisFrameVoid: assert result_kind == 'void' return @@ -648,7 +655,7 @@ jd.portal_runner_ptr = self.helper_func(jd._PTR_PORTAL_FUNCTYPE, ll_portal_runner) jd.portal_runner_adr = llmemory.cast_ptr_to_adr(jd.portal_runner_ptr) - self.cpu.portal_calldescr = self.cpu.calldescrof( + jd.portal_calldescr = self.cpu.calldescrof( jd._PTR_PORTAL_FUNCTYPE.TO, jd._PTR_PORTAL_FUNCTYPE.TO.ARGS, jd._PTR_PORTAL_FUNCTYPE.TO.RESULT) Modified: pypy/branch/fast-forward/pypy/jit/metainterp/warmstate.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/warmstate.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/warmstate.py Tue Sep 14 16:34:16 2010 @@ -211,7 +211,11 @@ entry_loop_token): cell = self.jit_cell_at_key(greenkey) cell.counter = -1 + old_token = cell.entry_loop_token cell.entry_loop_token = entry_loop_token + if old_token is not None: + cpu = self.warmrunnerdesc.cpu + cpu.redirect_call_assembler(old_token, entry_loop_token) # ---------- @@ -491,8 +495,12 @@ # unwrap_greenkey = self.make_unwrap_greenkey() jit_getter = self.make_jitcell_getter() + jd = self.jitdriver_sd + cpu = self.warmrunnerdesc.cpu def can_inline_greenargs(*greenargs): + if can_never_inline(*greenargs): + return False cell = jit_getter(False, *greenargs) if cell is not None and cell.dont_trace_here: return False @@ -503,11 +511,13 @@ self.can_inline_greenargs = can_inline_greenargs self.can_inline_callable = can_inline_callable - def get_assembler_token(greenkey): - greenargs = unwrap_greenkey(greenkey) - cell = jit_getter(False, *greenargs) - if cell is None or cell.counter >= 0: - return None + def get_assembler_token(greenkey, redboxes): + # 'redboxes' is only used to know the types of red arguments + cell = self.jit_cell_at_key(greenkey) + if cell.entry_loop_token is None: + from pypy.jit.metainterp.compile import compile_tmp_callback + cell.entry_loop_token = compile_tmp_callback(cpu, jd, greenkey, + redboxes) return cell.entry_loop_token self.get_assembler_token = get_assembler_token @@ -546,3 +556,16 @@ confirm_enter_jit_ptr) return fn(*args) self.confirm_enter_jit = confirm_enter_jit + # + can_never_inline_ptr = self.jitdriver_sd._can_never_inline_ptr + if can_never_inline_ptr is None: + def can_never_inline(*greenargs): + return False + else: + rtyper = self.warmrunnerdesc.rtyper + # + def can_never_inline(*greenargs): + fn = support.maybe_on_top_of_llinterp(rtyper, + can_never_inline_ptr) + return fn(*greenargs) + self.can_never_inline = can_never_inline Modified: pypy/branch/fast-forward/pypy/jit/tool/traceviewer.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/tool/traceviewer.py (original) +++ pypy/branch/fast-forward/pypy/jit/tool/traceviewer.py Tue Sep 14 16:34:16 2010 @@ -250,13 +250,14 @@ class Counts(dict): pass -def main(loopfile, options, view=True): +def main(loopfile, use_threshold, view=True): countname = py.path.local(loopfile + '.count') if countname.check(): - counts = [line.rsplit(':', 1) for line in countname.readlines()] - counts = Counts([(k, int(v.strip('\n'))) for k, v in counts]) + counts = [re.split(r' +', line, 1) for line in countname.readlines()] + counts = Counts([(k.strip("\n"), int(v.strip('\n'))) + for v, k in counts]) l = list(sorted(counts.values())) - if len(l) > 20 and options.use_threshold: + if len(l) > 20 and use_threshold: counts.threshold = l[-20] else: counts.threshold = 0 @@ -274,7 +275,7 @@ if __name__ == '__main__': parser = optparse.OptionParser(usage=__doc__) parser.add_option('--use-threshold', dest='use_threshold', - action="store_true") + action="store_true", default=False) options, args = parser.parse_args(sys.argv) if len(args) != 2: print __doc__ Modified: pypy/branch/fast-forward/pypy/module/_socket/interp_func.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_socket/interp_func.py (original) +++ pypy/branch/fast-forward/pypy/module/_socket/interp_func.py Tue Sep 14 16:34:16 2010 @@ -280,7 +280,7 @@ space.wrap(socktype), space.wrap(protocol), space.wrap(canonname), - addr.as_object(space)]) + addr.as_object(-1, space)]) # -1 as per cpython for (family, socktype, protocol, canonname, addr) in lst] return space.newlist(lst1) getaddrinfo.unwrap_spec = [ObjSpace, W_Root, W_Root, int, int, int, int] Modified: pypy/branch/fast-forward/pypy/module/_socket/interp_socket.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_socket/interp_socket.py (original) +++ pypy/branch/fast-forward/pypy/module/_socket/interp_socket.py Tue Sep 14 16:34:16 2010 @@ -24,7 +24,7 @@ try: sock, addr = self.accept(W_RSocket) return space.newtuple([space.wrap(sock), - addr.as_object(space)]) + addr.as_object(sock.fd, space)]) except SocketError, e: raise converted_error(space, e) accept_w.unwrap_spec = ['self', ObjSpace] @@ -109,7 +109,7 @@ """ try: addr = self.getpeername() - return addr.as_object(space) + return addr.as_object(self.fd, space) except SocketError, e: raise converted_error(space, e) getpeername_w.unwrap_spec = ['self', ObjSpace] @@ -122,7 +122,7 @@ """ try: addr = self.getsockname() - return addr.as_object(space) + return addr.as_object(self.fd, space) except SocketError, e: raise converted_error(space, e) getsockname_w.unwrap_spec = ['self', ObjSpace] @@ -202,7 +202,7 @@ try: data, addr = self.recvfrom(buffersize, flags) if addr: - w_addr = addr.as_object(space) + w_addr = addr.as_object(self.fd, space) else: w_addr = space.w_None return space.newtuple([space.wrap(data), w_addr]) @@ -330,7 +330,7 @@ try: readlgt, addr = self.recvfrom_into(rwbuffer, nbytes, flags) if addr: - w_addr = addr.as_object(space) + w_addr = addr.as_object(self.fd, space) else: w_addr = space.w_None return space.newtuple([space.wrap(readlgt), w_addr]) Modified: pypy/branch/fast-forward/pypy/module/_socket/test/test_sock_app.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_socket/test/test_sock_app.py (original) +++ pypy/branch/fast-forward/pypy/module/_socket/test/test_sock_app.py Tue Sep 14 16:34:16 2010 @@ -2,6 +2,8 @@ import sys import py from pypy.tool.udir import udir +from pypy.rlib import rsocket +from pypy.rpython.lltypesystem import lltype, rffi def setup_module(mod): mod.space = gettestobjspace(usemodules=['_socket', 'array']) @@ -221,21 +223,45 @@ "(_socket, host, port): return _socket.getaddrinfo(host, port)") assert space.unwrap(w_l) == info -def test_unknown_addr_as_object(): - from pypy.rlib import rsocket - from pypy.rpython.lltypesystem import lltype, rffi - +def test_unknown_addr_as_object(): c_addr = lltype.malloc(rsocket._c.sockaddr, flavor='raw') c_addr.c_sa_data[0] = 'c' rffi.setintfield(c_addr, 'c_sa_family', 15) # XXX what size to pass here? for the purpose of this test it has # to be short enough so we have some data, 1 sounds good enough # + sizeof USHORT - w_obj = rsocket.Address(c_addr, 1 + 2).as_object(space) + w_obj = rsocket.Address(c_addr, 1 + 2).as_object(-1, space) assert space.is_true(space.isinstance(w_obj, space.w_tuple)) assert space.int_w(space.getitem(w_obj, space.wrap(0))) == 15 assert space.str_w(space.getitem(w_obj, space.wrap(1))) == 'c' +def test_addr_raw_packet(): + if not hasattr(rsocket._c, 'sockaddr_ll'): + py.test.skip("posix specific test") + c_addr_ll = lltype.malloc(rsocket._c.sockaddr_ll, flavor='raw') + addrlen = rffi.sizeof(rsocket._c.sockaddr_ll) + c_addr = rffi.cast(lltype.Ptr(rsocket._c.sockaddr), c_addr_ll) + rffi.setintfield(c_addr_ll, 'c_sll_ifindex', 1) + rffi.setintfield(c_addr_ll, 'c_sll_protocol', 8) + rffi.setintfield(c_addr_ll, 'c_sll_pkttype', 13) + rffi.setintfield(c_addr_ll, 'c_sll_hatype', 0) + rffi.setintfield(c_addr_ll, 'c_sll_halen', 3) + c_addr_ll.c_sll_addr[0] = 'a' + c_addr_ll.c_sll_addr[1] = 'b' + c_addr_ll.c_sll_addr[2] = 'c' + rffi.setintfield(c_addr, 'c_sa_family', socket.AF_PACKET) + # fd needs to be somehow valid + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + fd = s.fileno() + w_obj = rsocket.make_address(c_addr, addrlen).as_object(fd, space) + assert space.is_true(space.eq(w_obj, space.newtuple([ + space.wrap('lo'), + space.wrap(socket.ntohs(8)), + space.wrap(13), + space.wrap(False), + space.wrap("abc"), + ]))) + def test_getnameinfo(): host = "127.0.0.1" port = 25 Modified: pypy/branch/fast-forward/pypy/module/_ssl/interp_ssl.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_ssl/interp_ssl.py (original) +++ pypy/branch/fast-forward/pypy/module/_ssl/interp_ssl.py Tue Sep 14 16:34:16 2010 @@ -18,6 +18,9 @@ # need of winsock2. Remove this when separate compilation is # available... 'winsock2.h', + # wincrypt.h defines X509_NAME, include it here + # so that openssl/ssl.h can repair this nonsense. + 'wincrypt.h', 'openssl/ssl.h', 'openssl/err.h'] else: @@ -88,18 +91,12 @@ globals()[k] = v # opaque structures -SSL_METHOD = rffi.VOIDP -SSL_CTX = rffi.VOIDP -SSL = rffi.VOIDP -BIO = rffi.VOIDP -X509 = rffi.VOIDP -X509_NAME = rffi.VOIDP - -SSL_CTX_P = rffi.CArrayPtr(SSL_CTX) -BIO_P = rffi.CArrayPtr(BIO) -SSL_P = rffi.CArrayPtr(SSL) -X509_P = rffi.CArrayPtr(X509) -X509_NAME_P = rffi.CArrayPtr(X509_NAME) +SSL_METHOD = rffi.COpaquePtr('SSL_METHOD') +SSL_CTX = rffi.COpaquePtr('SSL_CTX') +SSL = rffi.COpaquePtr('SSL') +BIO = rffi.COpaquePtr('BIO') +X509 = rffi.COpaquePtr('X509') +X509_NAME = rffi.COpaquePtr('X509_NAME') HAVE_OPENSSL_RAND = OPENSSL_VERSION_NUMBER >= 0x0090500f @@ -125,33 +122,33 @@ ssl_external('RAND_add', [rffi.CCHARP, rffi.INT, rffi.DOUBLE], lltype.Void) ssl_external('RAND_status', [], rffi.INT) ssl_external('RAND_egd', [rffi.CCHARP], rffi.INT) -ssl_external('SSL_CTX_new', [rffi.CArrayPtr(SSL_METHOD)], SSL_CTX_P) -ssl_external('SSLv23_method', [], rffi.CArrayPtr(SSL_METHOD)) -ssl_external('SSL_CTX_use_PrivateKey_file', [SSL_CTX_P, rffi.CCHARP, rffi.INT], rffi.INT) -ssl_external('SSL_CTX_use_certificate_chain_file', [SSL_CTX_P, rffi.CCHARP], rffi.INT) -ssl_external('SSL_CTX_ctrl', [SSL_CTX_P, rffi.INT, rffi.INT, rffi.VOIDP], rffi.INT) -ssl_external('SSL_CTX_set_verify', [SSL_CTX_P, rffi.INT, rffi.VOIDP], lltype.Void) -ssl_external('SSL_new', [SSL_CTX_P], SSL_P) -ssl_external('SSL_set_fd', [SSL_P, rffi.INT], rffi.INT) -ssl_external('BIO_ctrl', [BIO_P, rffi.INT, rffi.INT, rffi.VOIDP], rffi.INT) -ssl_external('SSL_get_rbio', [SSL_P], BIO_P) -ssl_external('SSL_get_wbio', [SSL_P], BIO_P) -ssl_external('SSL_set_connect_state', [SSL_P], lltype.Void) -ssl_external('SSL_connect', [SSL_P], rffi.INT) -ssl_external('SSL_get_error', [SSL_P, rffi.INT], rffi.INT) +ssl_external('SSL_CTX_new', [SSL_METHOD], SSL_CTX) +ssl_external('SSLv23_method', [], SSL_METHOD) +ssl_external('SSL_CTX_use_PrivateKey_file', [SSL_CTX, rffi.CCHARP, rffi.INT], rffi.INT) +ssl_external('SSL_CTX_use_certificate_chain_file', [SSL_CTX, rffi.CCHARP], rffi.INT) +ssl_external('SSL_CTX_ctrl', [SSL_CTX, rffi.INT, rffi.INT, rffi.VOIDP], rffi.INT) +ssl_external('SSL_CTX_set_verify', [SSL_CTX, rffi.INT, rffi.VOIDP], lltype.Void) +ssl_external('SSL_new', [SSL_CTX], SSL) +ssl_external('SSL_set_fd', [SSL, rffi.INT], rffi.INT) +ssl_external('BIO_ctrl', [BIO, rffi.INT, rffi.INT, rffi.VOIDP], rffi.INT) +ssl_external('SSL_get_rbio', [SSL], BIO) +ssl_external('SSL_get_wbio', [SSL], BIO) +ssl_external('SSL_set_connect_state', [SSL], lltype.Void) +ssl_external('SSL_connect', [SSL], rffi.INT) +ssl_external('SSL_get_error', [SSL, rffi.INT], rffi.INT) ssl_external('ERR_get_error', [], rffi.INT) ssl_external('ERR_error_string', [rffi.ULONG, rffi.CCHARP], rffi.CCHARP) -ssl_external('SSL_get_peer_certificate', [SSL_P], X509_P) -ssl_external('X509_get_subject_name', [X509_P], X509_NAME_P) -ssl_external('X509_get_issuer_name', [X509_P], X509_NAME_P) -ssl_external('X509_NAME_oneline', [X509_NAME_P, rffi.CCHARP, rffi.INT], rffi.CCHARP) -ssl_external('X509_free', [X509_P], lltype.Void) -ssl_external('SSL_free', [SSL_P], lltype.Void) -ssl_external('SSL_CTX_free', [SSL_CTX_P], lltype.Void) -ssl_external('SSL_write', [SSL_P, rffi.CCHARP, rffi.INT], rffi.INT) -ssl_external('SSL_pending', [SSL_P], rffi.INT) -ssl_external('SSL_read', [SSL_P, rffi.CCHARP, rffi.INT], rffi.INT) +ssl_external('SSL_get_peer_certificate', [SSL], X509) +ssl_external('X509_get_subject_name', [X509], X509_NAME) +ssl_external('X509_get_issuer_name', [X509], X509_NAME) +ssl_external('X509_NAME_oneline', [X509_NAME, rffi.CCHARP, rffi.INT], rffi.CCHARP) +ssl_external('X509_free', [X509], lltype.Void) +ssl_external('SSL_free', [SSL], lltype.Void) +ssl_external('SSL_CTX_free', [SSL_CTX], lltype.Void) +ssl_external('SSL_write', [SSL, rffi.CCHARP, rffi.INT], rffi.INT) +ssl_external('SSL_pending', [SSL], rffi.INT) +ssl_external('SSL_read', [SSL, rffi.CCHARP, rffi.INT], rffi.INT) def ssl_error(space, msg): w_module = space.getbuiltinmodule('_ssl') @@ -212,9 +209,9 @@ def __init__(self, space): self.space = space self.w_socket = None - self.ctx = lltype.nullptr(SSL_CTX_P.TO) - self.ssl = lltype.nullptr(SSL_P.TO) - self.server_cert = lltype.nullptr(X509_P.TO) + self.ctx = lltype.nullptr(SSL_CTX.TO) + self.ssl = lltype.nullptr(SSL.TO) + self.server_cert = lltype.nullptr(X509.TO) self._server = lltype.malloc(rffi.CCHARP.TO, X509_NAME_MAXLEN, flavor='raw') self._server[0] = '\0' self._issuer = lltype.malloc(rffi.CCHARP.TO, X509_NAME_MAXLEN, flavor='raw') Modified: pypy/branch/fast-forward/pypy/module/_weakref/interp__weakref.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_weakref/interp__weakref.py (original) +++ pypy/branch/fast-forward/pypy/module/_weakref/interp__weakref.py Tue Sep 14 16:34:16 2010 @@ -118,7 +118,7 @@ try: w_self.space.call_function(w_self.w_callable, w_self) except OperationError, e: - e.write_unraisable(w_self.space, 'function', w_self.w_callable) + e.write_unraisable(w_self.space, 'weakref callback ', w_self.w_callable) class W_Weakref(W_WeakrefBase): Modified: pypy/branch/fast-forward/pypy/module/_winreg/interp_winreg.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_winreg/interp_winreg.py (original) +++ pypy/branch/fast-forward/pypy/module/_winreg/interp_winreg.py Tue Sep 14 16:34:16 2010 @@ -20,16 +20,19 @@ self.Close(space) descr_del.unwrap_spec = ['self', ObjSpace] + def as_int(self): + return rffi.cast(rffi.SIZE_T, self.hkey) + def descr_nonzero(self, space): - return space.wrap(self.hkey != 0) + return space.wrap(self.as_int() != 0) descr_nonzero.unwrap_spec = ['self', ObjSpace] def descr_repr(self, space): - return space.wrap("" % (self.hkey,)) + return space.wrap("" % (self.as_int(),)) descr_repr.unwrap_spec = ['self', ObjSpace] def descr_int(self, space): - return space.wrap(self.hkey) + return space.wrap(self.as_int()) descr_int.unwrap_spec = ['self', ObjSpace] def Close(self, space): @@ -49,12 +52,13 @@ need the underlying win32 handle to exist beyond the lifetime of the handle object. On 64 bit windows, the result of this function is a long integer""" - hkey = self.hkey - self.hkey = 0 - return space.wrap(hkey) + key = self.as_int() + self.hkey = rwin32.NULL_HANDLE + return space.wrap(key) Detach.unwrap_spec = ['self', ObjSpace] -def new_HKEY(space, w_subtype, hkey): +def new_HKEY(space, w_subtype, key): + hkey = rffi.cast(rwinreg.HKEY, key) return space.wrap(W_HKEY(hkey)) descr_HKEY_new = interp2app(new_HKEY, unwrap_spec=[ObjSpace, W_Root, int]) @@ -98,9 +102,9 @@ elif isinstance(w_hkey, W_HKEY): return w_hkey.hkey elif space.is_true(space.isinstance(w_hkey, space.w_int)): - return space.int_w(w_hkey) + return rffi.cast(rwinreg.HKEY, space.int_w(w_hkey)) elif space.is_true(space.isinstance(w_hkey, space.w_long)): - return space.uint_w(w_hkey) + return rffi.cast(rwinreg.HKEY, space.uint_w(w_hkey)) else: errstring = space.wrap("The object is not a PyHKEY object") raise OperationError(space.w_TypeError, errstring) @@ -631,8 +635,8 @@ null_dword, ft) if ret != 0: raiseWindowsError(space, ret, 'RegQueryInfoKey') - l = (ft[0].c_dwLowDateTime + - (ft[0].c_dwHighDateTime << 32)) + l = ((lltype.r_longlong(ft[0].c_dwHighDateTime) << 32) + + lltype.r_longlong(ft[0].c_dwLowDateTime)) return space.newtuple([space.wrap(nSubKeys[0]), space.wrap(nValues[0]), space.wrap(l)]) Modified: pypy/branch/fast-forward/pypy/module/array/interp_array.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/array/interp_array.py (original) +++ pypy/branch/fast-forward/pypy/module/array/interp_array.py Tue Sep 14 16:34:16 2010 @@ -26,6 +26,11 @@ raise OperationError(space.w_TypeError, space.wrap(msg)) typecode = typecode[0] + if space.is_w(w_cls, space.gettypeobject(W_ArrayBase.typedef)): + if len(w_args.keywords_w) > 0: + msg = 'array.array() does not take keyword arguments' + raise OperationError(space.w_TypeError, space.wrap(msg)) + for tc in unroll_typecodes: if typecode == tc: a = space.allocate_instance(types[tc].w_class, w_cls) @@ -99,7 +104,6 @@ def register(typeorder): typeorder[W_ArrayBase] = [] - class TypeCode(object): def __init__(self, itemtype, unwrap, canoverflow=False, signed=False): self.itemtype = itemtype @@ -646,12 +650,6 @@ s = "array('%s', %s)" % (self.typecode, space.str_w(r)) return space.wrap(s) - init_signature = Signature(['typecode', 'initializer']) - init_defaults = [None, None] - - def init__Array(space, self, args): - args.parse_obj(None, 'array', init_signature, init_defaults) - mytype.w_class = W_Array # Annotator seems to mess up if the names are not unique Modified: pypy/branch/fast-forward/pypy/module/array/test/test_array.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/array/test/test_array.py (original) +++ pypy/branch/fast-forward/pypy/module/array/test/test_array.py Tue Sep 14 16:34:16 2010 @@ -483,7 +483,6 @@ b = self.array(t, v1) c = self.array(t, v2) - print (a==7) assert (a == 7) is False assert (comparable() == a) is True assert (a == comparable()) is True @@ -697,7 +696,6 @@ assert isinstance(self.array(t), self.array) def test_subclass(self): - print type(self.array('b')) assert len(self.array('b')) == 0 a = self.array('i') @@ -708,14 +706,43 @@ class adder(array): def __getitem__(self, i): - print 25 return array.__getitem__(self, i) + 1 a = adder('i', (1, 2, 3)) - print type(a) assert len(a) == 3 assert a[0] == 2 + def test_subclass_new(self): + array = self.array + class Image(array): + def __new__(cls, width, height, typecode='d'): + self = array.__new__(cls, typecode, [0] * (width * height)) + self.width = width + self.height = height + return self + + def _index(self, (x,y)): + x = min(max(x, 0), self.width-1) + y = min(max(y, 0), self.height-1) + return y * self.width + x + + def __getitem__(self, i): + return array.__getitem__(self, self._index(i)) + + def __setitem__(self, i, val): + return array.__setitem__(self, self._index(i), val) + + img = Image(5, 10, 'B') + for y in range(10): + for x in range(5): + img[x, y] = x * y + for y in range(10): + for x in range(5): + assert img[x, y] == x * y + + assert img[3, 25] == 3 * 9 + + def test_override_from(self): class mya(self.array): def fromlist(self, lst): Modified: pypy/branch/fast-forward/pypy/module/pypyjit/interp_jit.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/pypyjit/interp_jit.py (original) +++ pypy/branch/fast-forward/pypy/module/pypyjit/interp_jit.py Tue Sep 14 16:34:16 2010 @@ -36,11 +36,13 @@ bytecode.jit_cells[next_instr] = newcell def confirm_enter_jit(next_instr, bytecode, frame, ec): - return (not (bytecode.co_flags & CO_GENERATOR) and - frame.w_f_trace is None and + return (frame.w_f_trace is None and ec.profilefunc is None and ec.w_tracefunc is None) +def can_never_inline(next_instr, bytecode): + return (bytecode.co_flags & CO_GENERATOR) != 0 + class PyPyJitDriver(JitDriver): reds = ['frame', 'ec'] @@ -58,7 +60,8 @@ pypyjitdriver = PyPyJitDriver(get_printable_location = get_printable_location, get_jitcell_at = get_jitcell_at, set_jitcell_at = set_jitcell_at, - confirm_enter_jit = confirm_enter_jit) + confirm_enter_jit = confirm_enter_jit, + can_never_inline = can_never_inline) class __extend__(PyFrame): Modified: pypy/branch/fast-forward/pypy/module/pypyjit/policy.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/pypyjit/policy.py (original) +++ pypy/branch/fast-forward/pypy/module/pypyjit/policy.py Tue Sep 14 16:34:16 2010 @@ -32,8 +32,6 @@ return False if mod.startswith('pypy.interpreter.pyparser.'): return False - if mod == 'pypy.interpreter.generator': - return False if mod.startswith('pypy.module.'): modname = mod[len('pypy.module.'):] if not self.look_inside_pypy_module(modname): Modified: pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py (original) +++ pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py Tue Sep 14 16:34:16 2010 @@ -110,6 +110,7 @@ if sys.platform.startswith('win'): py.test.skip("XXX this is not Windows-friendly") + print logfilepath child_stdout = os.popen('PYPYLOG=":%s" "%s" "%s"' % ( logfilepath, self.pypy_c, filepath), 'r') result = child_stdout.read() @@ -118,6 +119,7 @@ assert result.splitlines()[-1].strip() == 'OK :-)' self.parse_loops(logfilepath) self.print_loops() + print logfilepath if self.total_ops > expected_max_ops: assert 0, "too many operations: got %d, expected maximum %d" % ( self.total_ops, expected_max_ops) @@ -846,6 +848,221 @@ return intimg[i - 1] ''', maxops, ([tc], res)) + def test_intbound_simple(self): + ops = ('<', '>', '<=', '>=', '==', '!=') + nbr = (3, 7) + for o1 in ops: + for o2 in ops: + for n1 in nbr: + for n2 in nbr: + src = ''' + def f(i): + a, b = 3, 3 + if i %s %d: + a = 0 + else: + a = 1 + if i %s %d: + b = 0 + else: + b = 1 + return a + b * 2 + + def main(): + res = [0] * 4 + idx = [] + for i in range(15): + idx.extend([i] * 1500) + for i in idx: + res[f(i)] += 1 + return res + + ''' % (o1, n1, o2, n2) + + exec(str(py.code.Source(src))) + res = [0] * 4 + for i in range(15): + res[f(i)] += 1500 + self.run_source(src, 268, ([], res)) + + def test_intbound_addsub_mix(self): + tests = ('i > 4', 'i > 2', 'i + 1 > 2', '1 + i > 4', + 'i - 1 > 1', '1 - i > 1', '1 - i < -3', + 'i == 1', 'i == 5', 'i != 1', '-2 * i < -4') + for t1 in tests: + for t2 in tests: + print t1, t2 + src = ''' + def f(i): + a, b = 3, 3 + if %s: + a = 0 + else: + a = 1 + if %s: + b = 0 + else: + b = 1 + return a + b * 2 + + def main(): + res = [0] * 4 + idx = [] + for i in range(15): + idx.extend([i] * 1500) + for i in idx: + res[f(i)] += 1 + return res + + ''' % (t1, t2) + + exec(str(py.code.Source(src))) + res = [0] * 4 + for i in range(15): + res[f(i)] += 1500 + self.run_source(src, 280, ([], res)) + + def test_intbound_gt(self): + self.run_source(''' + def main(): + i, a, b = 0, 0, 0 + while i < 2000: + if i > -1: + a += 1 + if i > -2: + b += 1 + i += 1 + return (a, b) + ''', 48, ([], (2000, 2000))) + + def test_intbound_sub_lt(self): + self.run_source(''' + def main(): + i, a, b = 0, 0, 0 + while i < 2000: + if i - 10 < 1995: + a += 1 + i += 1 + return (a, b) + ''', 38, ([], (2000, 0))) + + def test_intbound_addsub_ge(self): + self.run_source(''' + def main(): + i, a, b = 0, 0, 0 + while i < 2000: + if i + 5 >= 5: + a += 1 + if i - 1 >= -1: + b += 1 + i += 1 + return (a, b) + ''', 56, ([], (2000, 2000))) + + def test_intbound_addmul_ge(self): + self.run_source(''' + def main(): + i, a, b = 0, 0, 0 + while i < 2000: + if i + 5 >= 5: + a += 1 + if 2 * i >= 0: + b += 1 + i += 1 + return (a, b) + ''', 53, ([], (2000, 2000))) + + def test_intbound_eq(self): + self.run_source(''' + def main(a): + i, s = 0, 0 + while i < 1500: + if a == 7: + s += a + 1 + elif i == 10: + s += i + else: + s += 1 + i += 1 + return s + ''', 69, ([7], 12000), ([42], 1509), ([10], 1509)) + + def test_intbound_mul(self): + self.run_source(''' + def main(a): + i, s = 0, 0 + while i < 1500: + assert i >= 0 + if 2 * i < 30000: + s += 1 + else: + s += a + i += 1 + return s + ''', 43, ([7], 1500)) + + def test_assert(self): + self.run_source(''' + def main(a): + i, s = 0, 0 + while i < 1500: + assert a == 7 + s += a + 1 + i += 1 + return s + ''', 38, ([7], 8*1500)) + + def test_zeropadded(self): + self.run_source(''' + from array import array + class ZeroPadded(array): + def __new__(cls, l): + self = array.__new__(cls, 'd', range(l)) + return self + + def __getitem__(self, i): + if i < 0 or i >= self.__len__(): + return 0 + return array.__getitem__(self, i) + + + def main(): + buf = ZeroPadded(2000) + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + + ''', 232, ([], 9895050.0)) + + def test_circular(self): + self.run_source(''' + from array import array + class Circular(array): + def __new__(cls): + self = array.__new__(cls, 'd', range(256)) + return self + def __getitem__(self, i): + # assert self.__len__() == 256 (FIXME: does not improve) + return array.__getitem__(self, i & 255) + + def main(): + buf = Circular() + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + + ''', 170, ([], 1239690.0)) + + + + # test_circular + class AppTestJIT(PyPyCJITTests): def setup_class(cls): if not option.runappdirect: Modified: pypy/branch/fast-forward/pypy/rlib/_rsocket_rffi.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/_rsocket_rffi.py (original) +++ pypy/branch/fast-forward/pypy/rlib/_rsocket_rffi.py Tue Sep 14 16:34:16 2010 @@ -32,6 +32,9 @@ 'arpa/inet.h', 'stdint.h', 'errno.h', + 'netpacket/packet.h', + 'sys/ioctl.h', + 'net/if.h', ) cond_includes = [('AF_NETLINK', 'linux/netlink.h')] libraries = () @@ -190,6 +193,8 @@ FD_CONNECT_BIT FD_CLOSE_BIT WSA_IO_PENDING WSA_IO_INCOMPLETE WSA_INVALID_HANDLE WSA_INVALID_PARAMETER WSA_NOT_ENOUGH_MEMORY WSA_OPERATION_ABORTED + +SIOCGIFNAME '''.split() for name in constant_names: @@ -309,6 +314,19 @@ [('fd', socketfd_type), ('events', rffi.SHORT), ('revents', rffi.SHORT)]) + + CConfig.sockaddr_ll = platform.Struct('struct sockaddr_ll', + [('sll_ifindex', rffi.INT), + ('sll_protocol', rffi.INT), + ('sll_pkttype', rffi.INT), + ('sll_hatype', rffi.INT), + ('sll_addr', rffi.CFixedArray(rffi.CHAR, 8)), + ('sll_halen', rffi.INT)], + ) + + CConfig.ifreq = platform.Struct('struct ifreq', [('ifr_ifindex', rffi.INT), + ('ifr_name', rffi.CFixedArray(rffi.CHAR, 8))]) + if _WIN32: CConfig.WSAEVENT = platform.SimpleType('WSAEVENT', rffi.VOIDP) CConfig.WSANETWORKEVENTS = platform.Struct( @@ -408,6 +426,8 @@ if _POSIX: nfds_t = cConfig.nfds_t pollfd = cConfig.pollfd + sockaddr_ll = cConfig.sockaddr_ll + ifreq = cConfig.ifreq if WIN32: WSAEVENT = cConfig.WSAEVENT WSANETWORKEVENTS = cConfig.WSANETWORKEVENTS @@ -510,6 +530,8 @@ socketpair_t = rffi.CArray(socketfd_type) socketpair = external('socketpair', [rffi.INT, rffi.INT, rffi.INT, lltype.Ptr(socketpair_t)], rffi.INT) + ioctl = external('ioctl', [socketfd_type, rffi.INT, lltype.Ptr(ifreq)], + rffi.INT) if _WIN32: ioctlsocket = external('ioctlsocket', Modified: pypy/branch/fast-forward/pypy/rlib/jit.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/jit.py (original) +++ pypy/branch/fast-forward/pypy/rlib/jit.py Tue Sep 14 16:34:16 2010 @@ -253,7 +253,8 @@ def __init__(self, greens=None, reds=None, virtualizables=None, get_jitcell_at=None, set_jitcell_at=None, - get_printable_location=None, confirm_enter_jit=None): + get_printable_location=None, confirm_enter_jit=None, + can_never_inline=None): if greens is not None: self.greens = greens if reds is not None: @@ -270,6 +271,7 @@ self.set_jitcell_at = set_jitcell_at self.get_printable_location = get_printable_location self.confirm_enter_jit = confirm_enter_jit + self.can_never_inline = can_never_inline def _freeze_(self): return True Modified: pypy/branch/fast-forward/pypy/rlib/rmmap.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rmmap.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rmmap.py Tue Sep 14 16:34:16 2010 @@ -72,6 +72,7 @@ setattr(CConfig, name, rffi_platform.ConstantInteger(name)) from pypy.rlib.rwin32 import HANDLE, LPHANDLE + from pypy.rlib.rwin32 import NULL_HANDLE, INVALID_HANDLE_VALUE from pypy.rlib.rwin32 import DWORD, WORD, DWORD_PTR, LPDWORD from pypy.rlib.rwin32 import BOOL, LPVOID, LPCVOID, LPCSTR, SIZE_T from pypy.rlib.rwin32 import INT, LONG, PLONG @@ -183,7 +184,7 @@ ##_get_osfhandle = winexternal('_get_osfhandle', [INT], LONG) # casting from int to handle did not work, so I changed this # but it should not be so! - _get_osfhandle = winexternal('_get_osfhandle', [INT], HANDLE) + _get_osfhandle = winexternal('_get_osfhandle', [INT], rffi.INTPTR_T) GetLastError = winexternal('GetLastError', [], DWORD) VirtualAlloc = winexternal('VirtualAlloc', [rffi.VOIDP, rffi.SIZE_T, DWORD, DWORD], @@ -228,8 +229,7 @@ def _get_error_no(): return rffi.cast(lltype.Signed, GetLastError()) - NULL_HANDLE = rffi.cast(HANDLE, 0) - INVALID_HANDLE = rffi.cast(HANDLE, -1) + INVALID_HANDLE = INVALID_HANDLE_VALUE PAGESIZE = _get_page_size() NULL = lltype.nullptr(PTR.TO) @@ -684,12 +684,11 @@ # assume -1 and 0 both mean invalid file descriptor # to 'anonymously' map memory. if fileno != -1 and fileno != 0: - fh = _get_osfhandle(fileno) - # parts of the C library use HANDLE, others just ints - # XXX hack - made _get_osfhandle compatible - if fh == INVALID_HANDLE: + res = _get_osfhandle(fileno) + if res == rffi.cast(rffi.SSIZE_T, INVALID_HANDLE): errno = _get_error_no() raise OSError(errno, os.strerror(errno)) + fh = rffi.cast(HANDLE, res) # Win9x appears to need us seeked to zero # SEEK_SET = 0 # libc._lseek(fileno, 0, SEEK_SET) Modified: pypy/branch/fast-forward/pypy/rlib/rsocket.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rsocket.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rsocket.py Tue Sep 14 16:34:16 2010 @@ -6,8 +6,7 @@ # Known missing features: # -# - support for non-Linux platforms -# - address families other than AF_INET, AF_INET6, AF_UNIX +# - address families other than AF_INET, AF_INET6, AF_UNIX, AF_PACKET # - methods makefile(), # - SSL # @@ -109,7 +108,7 @@ """ keepalive_until_here(self) - def as_object(self, space): + def as_object(self, fd, space): """Convert the address to an app-level object.""" # If we don't know the address family, don't raise an # exception -- return it as a tuple. @@ -200,6 +199,66 @@ # ____________________________________________________________ +if 'AF_PACKET' in constants: + class PacketAddress(Address): + family = AF_PACKET + struct = _c.sockaddr_ll + maxlen = minlen = sizeof(struct) + + def get_ifname(self, fd): + a = self.lock(_c.sockaddr_ll) + p = lltype.malloc(_c.ifreq, flavor='raw') + rffi.setintfield(p, 'c_ifr_ifindex', + rffi.getintfield(a, 'c_sll_ifindex')) + if (_c.ioctl(fd, _c.SIOCGIFNAME, p) == 0): + # eh, the iface name is a constant length array + i = 0 + d = [] + while p.c_ifr_name[i] != '\x00' and i < len(p.c_ifr_name): + d.append(p.c_ifr_name[i]) + i += 1 + ifname = ''.join(d) + else: + ifname = "" + lltype.free(p, flavor='raw') + self.unlock() + return ifname + + def get_protocol(self): + a = self.lock(_c.sockaddr_ll) + res = ntohs(rffi.getintfield(a, 'c_sll_protocol')) + self.unlock() + return res + + def get_pkttype(self): + a = self.lock(_c.sockaddr_ll) + res = rffi.getintfield(a, 'c_sll_pkttype') + self.unlock() + return res + + def get_hatype(self): + a = self.lock(_c.sockaddr_ll) + res = bool(rffi.getintfield(a, 'c_sll_hatype')) + self.unlock() + return res + + def get_addr(self): + a = self.lock(_c.sockaddr_ll) + lgt = rffi.getintfield(a, 'c_sll_halen') + d = [] + for i in range(lgt): + d.append(a.c_sll_addr[i]) + res = "".join(d) + self.unlock() + return res + + def as_object(self, fd, space): + return space.newtuple([space.wrap(self.get_ifname(fd)), + space.wrap(self.get_protocol()), + space.wrap(self.get_pkttype()), + space.wrap(self.get_hatype()), + space.wrap(self.get_addr())]) + class INETAddress(IPAddress): family = AF_INET struct = _c.sockaddr_in @@ -228,7 +287,7 @@ self.get_host() == other.get_host() and self.get_port() == other.get_port()) - def as_object(self, space): + def as_object(self, fd, space): return space.newtuple([space.wrap(self.get_host()), space.wrap(self.get_port())]) @@ -317,7 +376,7 @@ self.get_flowinfo() == other.get_flowinfo() and self.get_scope_id() == other.get_scope_id()) - def as_object(self, space): + def as_object(self, fd, space): return space.newtuple([space.wrap(self.get_host()), space.wrap(self.get_port()), space.wrap(self.get_flowinfo()), @@ -421,7 +480,7 @@ return (isinstance(other, UNIXAddress) and self.get_path() == other.get_path()) - def as_object(self, space): + def as_object(self, fd, space): return space.wrap(self.get_path()) def from_object(space, w_address): @@ -456,7 +515,7 @@ def __repr__(self): return '' % (self.get_pid(), self.get_groups()) - def as_object(self, space): + def as_object(self, fd, space): return space.newtuple([space.wrap(self.get_pid()), space.wrap(self.get_groups())]) @@ -613,7 +672,7 @@ # convert an Address into an app-level object def addr_as_object(self, space, address): - return address.as_object(space) + return address.as_object(self.fd, space) # convert an app-level object into an Address # based on the current socket's family Modified: pypy/branch/fast-forward/pypy/rlib/rwin32.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rwin32.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rwin32.py Tue Sep 14 16:34:16 2010 @@ -81,9 +81,10 @@ return rffi.llexternal(name, args, result, compilation_info=eci, calling_conv='win') if WIN32: - HANDLE = rffi.ULONG + HANDLE = rffi.COpaquePtr(typedef='HANDLE') LPHANDLE = rffi.CArrayPtr(HANDLE) HMODULE = HANDLE + NULL_HANDLE = rffi.cast(HANDLE, 0) INVALID_HANDLE_VALUE = rffi.cast(HANDLE, -1) PFILETIME = rffi.CArrayPtr(FILETIME) Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/rffi.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/rffi.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/rffi.py Tue Sep 14 16:34:16 2010 @@ -358,9 +358,11 @@ if os.name != 'nt': TYPES.append('mode_t') TYPES.append('pid_t') + TYPES.append('ssize_t') else: MODE_T = lltype.Signed PID_T = lltype.Signed + SSIZE_T = lltype.Signed def populate_inttypes(): names = [] @@ -415,6 +417,7 @@ # ULONGLONG r_ulonglong # WCHAR_T r_wchar_t # SIZE_T r_size_t +# SSIZE_T r_ssize_t # TIME_T r_time_t # -------------------------------------------------------------------- # Note that rffi.r_int is not necessarily the same as @@ -535,6 +538,8 @@ # (use SIGNEDCHAR or UCHAR for the small integer types) CHAR = lltype.Char +INTPTR_T = SSIZE_T + # double DOUBLE = lltype.Float Modified: pypy/branch/fast-forward/pypy/rpython/module/ll_os.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/module/ll_os.py (original) +++ pypy/branch/fast-forward/pypy/rpython/module/ll_os.py Tue Sep 14 16:34:16 2010 @@ -1046,7 +1046,7 @@ rffi.VOIDP, rwin32.DWORD], rwin32.BOOL) - _open_osfhandle = self.llexternal('_open_osfhandle', [rffi.ULONG, + _open_osfhandle = self.llexternal('_open_osfhandle', [rffi.INTPTR_T, rffi.INT], rffi.INT) null = lltype.nullptr(rffi.VOIDP.TO) @@ -1059,8 +1059,8 @@ error = 0 else: error = rwin32.GetLastError() - hread = pread[0] - hwrite = pwrite[0] + hread = rffi.cast(rffi.INTPTR_T, pread[0]) + hwrite = rffi.cast(rffi.INTPTR_T, pwrite[0]) lltype.free(pwrite, flavor='raw') lltype.free(pread, flavor='raw') if error: Modified: pypy/branch/fast-forward/pypy/rpython/module/ll_os_stat.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/module/ll_os_stat.py (original) +++ pypy/branch/fast-forward/pypy/rpython/module/ll_os_stat.py Tue Sep 14 16:34:16 2010 @@ -437,5 +437,5 @@ def time_t_to_FILE_TIME(time, filetime): ft = lltype.r_longlong((time + secs_between_epochs) * 10000000) filetime.c_dwHighDateTime = lltype.r_uint(ft >> 32) - filetime.c_dwLowDateTime = lltype.r_uint(ft & ((1 << 32) - 1)) + filetime.c_dwLowDateTime = lltype.r_uint(ft & lltype.r_uint(-1)) Modified: pypy/branch/fast-forward/pypy/rpython/module/ll_win32file.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/module/ll_win32file.py (original) +++ pypy/branch/fast-forward/pypy/rpython/module/ll_win32file.py Tue Sep 14 16:34:16 2010 @@ -265,7 +265,8 @@ hFile = CreateFile(path, FILE_WRITE_ATTRIBUTES, 0, None, OPEN_EXISTING, - FILE_FLAG_BACKUP_SEMANTICS, 0) + FILE_FLAG_BACKUP_SEMANTICS, + rwin32.NULL_HANDLE) if hFile == rwin32.INVALID_HANDLE_VALUE: raise rwin32.lastWindowsError() ctime = lltype.nullptr(rwin32.FILETIME) Modified: pypy/branch/fast-forward/pypy/rpython/tool/rffi_platform.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/tool/rffi_platform.py (original) +++ pypy/branch/fast-forward/pypy/rpython/tool/rffi_platform.py Tue Sep 14 16:34:16 2010 @@ -11,6 +11,7 @@ from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.translator.platform import CompilationError from pypy.tool.udir import udir +from pypy.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong, intmask # ____________________________________________________________ # @@ -380,7 +381,7 @@ yield '}' def build_result(self, info, config_result): - return info['value'] + return expose_value_as_rpython(info['value']) class DefinedConstantInteger(CConfigEntry): """An entry in a CConfig class that stands for an externally @@ -406,7 +407,7 @@ def build_result(self, info, config_result): if info["defined"]: - return info['value'] + return expose_value_as_rpython(info['value']) return None class DefinedConstantDouble(CConfigEntry): @@ -654,6 +655,20 @@ raise TypeError("conflicting field type %r for %r" % (fieldtype, fieldname)) +def expose_value_as_rpython(value): + if intmask(value) == value: + return value + if r_uint(value) == value: + return r_uint(value) + try: + if r_longlong(value) == value: + return r_longlong(value) + except OverflowError: + pass + if r_ulonglong(value) == value: + return r_ulonglong(value) + raise OverflowError("value %d does not fit into any RPython integer type" + % (value,)) C_HEADER = """ #include Modified: pypy/branch/fast-forward/pypy/rpython/tool/test/test_rffi_platform.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/tool/test/test_rffi_platform.py (original) +++ pypy/branch/fast-forward/pypy/rpython/tool/test/test_rffi_platform.py Tue Sep 14 16:34:16 2010 @@ -5,7 +5,7 @@ from pypy.tool.udir import udir from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.translator.platform import platform -from pypy.rlib import rarithmetic +from pypy.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong, isnan def import_ctypes(): try: @@ -117,7 +117,7 @@ value = rffi_platform.getdefineddouble('BLAH', '#define BLAH 1.0e50000') assert value == float("inf") value = rffi_platform.getdefineddouble('BLAH', '#define BLAH (double)0/0') - assert rarithmetic.isnan(value) + assert isnan(value) def test_configure(): test_h = udir.join('test_ctypes_platform.h') @@ -370,3 +370,19 @@ padding = list(S._hints['padding']) d = {'c_c1': 'char'} assert S._hints['get_padding_drop'](d) == padding + +def test_expose_value_as_rpython(): + def get(x): + x = rffi_platform.expose_value_as_rpython(x) + return (x, type(x)) + assert get(5) == (5, int) + assert get(-82) == (-82, int) + assert get(sys.maxint) == (sys.maxint, int) + assert get(sys.maxint+1) == (sys.maxint+1, r_uint) + if sys.maxint == 2147483647: + assert get(9999999999) == (9999999999, r_longlong) + assert get(-9999999999) == (-9999999999, r_longlong) + assert get(2**63) == (2**63, r_ulonglong) + assert get(-2**63) == (-2**63, r_longlong) + py.test.raises(OverflowError, get, -2**63-1) + py.test.raises(OverflowError, get, 2**64) Modified: pypy/branch/fast-forward/pypy/tool/release/force-builds.py ============================================================================== --- pypy/branch/fast-forward/pypy/tool/release/force-builds.py (original) +++ pypy/branch/fast-forward/pypy/tool/release/force-builds.py Tue Sep 14 16:34:16 2010 @@ -21,9 +21,9 @@ 'own-linux-x86-64', # 'own-macosx-x86-32', 'pypy-c-app-level-linux-x86-32', - 'pypy-c-app-level-linux-64', + 'pypy-c-app-level-linux-x86-64', 'pypy-c-stackless-app-level-linux-x86-32', - 'pypy-c-app-level-win-32', + 'pypy-c-app-level-win-x86-32', 'pypy-c-jit-linux-x86-32', # 'pypy-c-jit-macosx-x86-32', 'pypy-c-jit-win-x86-32', Modified: pypy/branch/fast-forward/pypy/translator/c/database.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/database.py (original) +++ pypy/branch/fast-forward/pypy/translator/c/database.py Tue Sep 14 16:34:16 2010 @@ -2,7 +2,7 @@ Primitive, Ptr, typeOf, RuntimeTypeInfo, \ Struct, Array, FuncType, PyObject, Void, \ ContainerType, OpaqueType, FixedSizeArray, _uninitialized -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.lltypesystem.llmemory import WeakRef, _WeakRefType, GCREF from pypy.rpython.lltypesystem.rffi import CConstant from pypy.rpython.lltypesystem import llgroup @@ -183,6 +183,12 @@ if isinstance(T, Primitive) or T == GCREF: return PrimitiveName[T](obj, self) elif isinstance(T, Ptr): + if (isinstance(T.TO, OpaqueType) and + T.TO.hints.get('c_pointer_typedef') is not None): + if obj._obj is not None: + value = rffi.cast(rffi.SSIZE_T, obj) + return '((%s) %s)' % (cdecl(self.gettype(T), ''), + self.get(value)) if obj: # test if the ptr is non-NULL try: container = obj._obj From cfbolz at codespeak.net Tue Sep 14 17:54:29 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 14 Sep 2010 17:54:29 +0200 (CEST) Subject: [pypy-svn] r77069 - in pypy/branch/better-map-instances/pypy/rlib: . test Message-ID: <20100914155429.9369F282C0E@codespeak.net> Author: cfbolz Date: Tue Sep 14 17:54:27 2010 New Revision: 77069 Modified: pypy/branch/better-map-instances/pypy/rlib/rerased.py pypy/branch/better-map-instances/pypy/rlib/test/test_rerased.py Log: whack whack whack, until erasing fixed-size lists of instances works Modified: pypy/branch/better-map-instances/pypy/rlib/rerased.py ============================================================================== --- pypy/branch/better-map-instances/pypy/rlib/rerased.py (original) +++ pypy/branch/better-map-instances/pypy/rlib/rerased.py Tue Sep 14 17:54:27 2010 @@ -23,8 +23,15 @@ res = 2 * x + 1 if res > sys.maxint or res < -sys.maxint - 1: raise OverflowError + assert not isinstance(x, list) return Erased(x) +def erase_fixedsizelist(l, type): + assert isinstance(l, list) + result = Erased(l) + result._list_item_type = type + return result + def unerase(y, type): """Turn an erased object back into an object of type 'type'.""" if y._x is None: @@ -32,6 +39,14 @@ assert isinstance(y._x, type) return y._x +def unerase_fixedsizelist(y, type): + if y._x is None: + return None + assert isinstance(y._x, list) + if y._x: + assert isinstance(y._x[0], type) + return y._x + def is_integer(e): """Gives information whether the erased argument is a tagged integer or not.""" return isinstance(e._x, int) @@ -40,6 +55,7 @@ # ---------- implementation-specific ---------- class Erased(object): + _list_item_type = None def __init__(self, x): self._x = x def __repr__(self): @@ -55,6 +71,18 @@ return hop.r_result.specialize_call(hop) class Entry(ExtRegistryEntry): + _about_ = erase_fixedsizelist + + def compute_result_annotation(self, s_arg, s_type): + # s_type ignored: only for prebuilt erased lists + assert isinstance(s_arg, annmodel.SomeList) + s_arg.listdef.never_resize() + return SomeErased() + + def specialize_call(self, hop): + return hop.r_result.specialize_call(hop) + +class Entry(ExtRegistryEntry): _about_ = unerase def compute_result_annotation(self, s_obj, s_type): @@ -75,6 +103,21 @@ return hop.genop('cast_opaque_ptr', [v], resulttype = hop.r_result) class Entry(ExtRegistryEntry): + _about_ = unerase_fixedsizelist + + def compute_result_annotation(self, s_obj, s_type): + assert isinstance(s_type, annmodel.SomePBC) + assert len(s_type.descriptions) == 1 + clsdef = s_type.descriptions.keys()[0].getuniqueclassdef() + s_item = annmodel.SomeInstance(clsdef) + return self.bookkeeper.newlist(s_item) + + def specialize_call(self, hop): + v, t = hop.inputargs(hop.args_r[0], lltype.Void) + return hop.genop('cast_opaque_ptr', [v], resulttype = hop.r_result) + + +class Entry(ExtRegistryEntry): _about_ = is_integer def compute_result_annotation(self, s_obj): @@ -92,16 +135,21 @@ _type_ = Erased def compute_annotation(self): - from pypy.rlib import _jit_vref s_obj = self.bookkeeper.immutablevalue(self.instance._x) + if self.instance._list_item_type is not None: + # only non-resizable lists of instances for now + clsdef = self.bookkeeper.getuniqueclassdef(self.instance._list_item_type) + s_item = annmodel.SomeInstance(clsdef) + s_obj.listdef.generalize(s_item) + self.instance._s_list = s_obj return SomeErased() -# annotation and rtyping support +# annotation and rtyping support class SomeErased(annmodel.SomeObject): - def __init__(self): - pass + def __init__(self, s_obj=None): + self.s_obj = s_obj # only non-None for constants def can_be_none(self): return False # cannot be None, but can contain a None @@ -124,7 +172,7 @@ self.rtyper = rtyper def specialize_call(self, hop): - s_arg, = hop.args_s + s_arg = hop.args_s[0] r_generic_object = getinstancerepr(hop.rtyper, None) if (isinstance(s_arg, annmodel.SomeInstance) or (s_arg.is_constant() and s_arg.const is None)): @@ -133,6 +181,13 @@ v = hop.genop('cast_opaque_ptr', [v_instance], resulttype=self.lowleveltype) return v + elif isinstance(s_arg, annmodel.SomeList): + hop.exception_cannot_occur() + r_list = self.rtyper.getrepr(s_arg) + v_list = hop.inputarg(r_list, 0) + v = hop.genop('cast_opaque_ptr', [v_list], + resulttype=self.lowleveltype) + return v else: assert isinstance(s_arg, annmodel.SomeInteger) v_value = hop.inputarg(lltype.Signed, arg=0) @@ -152,6 +207,10 @@ def convert_const(self, value): if isinstance(value._x, int): return lltype.cast_int_to_ptr(self.lowleveltype, value._x * 2 + 1) + if isinstance(value._x, list): + r_list = self.rtyper.getrepr(value._s_list) + v = r_list.convert_const(value._x) + return lltype.cast_opaque_ptr(self.lowleveltype, v) else: r_generic_object = getinstancerepr(self.rtyper, None) v = r_generic_object.convert_const(value._x) Modified: pypy/branch/better-map-instances/pypy/rlib/test/test_rerased.py ============================================================================== --- pypy/branch/better-map-instances/pypy/rlib/test/test_rerased.py (original) +++ pypy/branch/better-map-instances/pypy/rlib/test/test_rerased.py Tue Sep 14 17:54:27 2010 @@ -1,6 +1,6 @@ import py import sys -from pypy.rlib.rerased import erase, unerase, is_integer, SomeErased +from pypy.rlib.rerased import * from pypy.annotation import model as annmodel from pypy.annotation.annrpython import RPythonAnnotator from pypy.rpython.test.test_llinterp import interpret @@ -39,6 +39,12 @@ py.test.raises(OverflowError, erase, -sys.maxint) py.test.raises(OverflowError, erase, -sys.maxint-1) +def test_list(): + l = [X()] + e = erase_fixedsizelist(l, X) + assert is_integer(e) is False + assert unerase_fixedsizelist(e, X) is l + def test_annotate_1(): def f(): return erase(X()) @@ -133,3 +139,23 @@ s_e2 = SomeErased() s_e2.const = 3 assert not annmodel.pair(s_e1, s_e2).union().is_constant() + + +def test_rtype_list(): + prebuilt_l = [X()] + prebuilt_e = erase_fixedsizelist(prebuilt_l, X) + def l(flag): + if flag == 1: + l = [X()] + e = erase_fixedsizelist(l, X) + elif flag == 2: + l = prebuilt_l + e = erase_fixedsizelist(l, X) + else: + l = prebuilt_l + e = prebuilt_e + assert is_integer(e) is False + assert unerase_fixedsizelist(e, X) is l + interpret(l, [0]) + interpret(l, [1]) + interpret(l, [2]) From afa at codespeak.net Tue Sep 14 18:10:24 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 14 Sep 2010 18:10:24 +0200 (CEST) Subject: [pypy-svn] r77070 - in pypy/branch/fast-forward/pypy: rpython rpython/lltypesystem rpython/test translator/c/src Message-ID: <20100914161024.00677282C0F@codespeak.net> Author: afa Date: Tue Sep 14 18:10:23 2010 New Revision: 77070 Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/lloperation.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/opimpl.py pypy/branch/fast-forward/pypy/rpython/rfloat.py pypy/branch/fast-forward/pypy/rpython/test/test_rfloat.py pypy/branch/fast-forward/pypy/translator/c/src/float.h Log: Implement r_float <-> r_ulonglong conversions, needed by rlib/rstruct/ieee.py at least. Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/lloperation.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/lloperation.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/lloperation.py Tue Sep 14 18:10:23 2010 @@ -347,10 +347,12 @@ 'cast_int_to_longlong': LLOp(canfold=True), 'cast_uint_to_int': LLOp(canfold=True), 'cast_uint_to_float': LLOp(canfold=True), - 'cast_longlong_to_float':LLOp(canfold=True), + 'cast_longlong_to_float' :LLOp(canfold=True), + 'cast_ulonglong_to_float':LLOp(canfold=True), 'cast_float_to_int': LLOp(canraise=(OverflowError,), tryfold=True), 'cast_float_to_uint': LLOp(canfold=True), # XXX need OverflowError? - 'cast_float_to_longlong':LLOp(canfold=True), + 'cast_float_to_longlong' :LLOp(canfold=True), + 'cast_float_to_ulonglong':LLOp(canfold=True), 'truncate_longlong_to_int':LLOp(canfold=True), 'force_cast': LLOp(sideeffects=False), # only for rffi.cast() Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/opimpl.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/opimpl.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/opimpl.py Tue Sep 14 18:10:23 2010 @@ -291,6 +291,13 @@ ui = float(int(i >> 31)) * float(0x80000000) return ui + li +def op_cast_ulonglong_to_float(i): + assert isinstance(i, r_ulonglong) + # take first 32 bits + li = float(int(i & r_ulonglong(0xffffffff))) + ui = float(int(i >> 32)) * float(0x100000000) + return ui + li + def op_cast_int_to_char(b): assert type(b) is int return chr(b) @@ -319,6 +326,10 @@ truncated = int((small - high) * r) return r_longlong_result(high) * 0x100000000 + truncated +def op_cast_float_to_ulonglong(f): + assert type(f) is float + return r_ulonglong(r_longlong(f)) + def op_cast_char_to_int(b): assert type(b) is str and len(b) == 1 return ord(b) Modified: pypy/branch/fast-forward/pypy/rpython/rfloat.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/rfloat.py (original) +++ pypy/branch/fast-forward/pypy/rpython/rfloat.py Tue Sep 14 18:10:23 2010 @@ -1,7 +1,8 @@ from pypy.tool.pairtype import pairtype from pypy.annotation import model as annmodel -from pypy.rpython.lltypesystem.lltype import \ - Signed, Unsigned, SignedLongLong, Bool, Float, Void, pyobjectptr +from pypy.rpython.lltypesystem.lltype import ( + Signed, Unsigned, SignedLongLong, UnsignedLongLong, + Bool, Float, Void, pyobjectptr) from pypy.rpython.error import TyperError from pypy.rpython.rmodel import FloatRepr from pypy.rpython.rmodel import IntegerRepr, BoolRepr @@ -157,6 +158,9 @@ if r_from.lowleveltype == SignedLongLong and r_to.lowleveltype == Float: log.debug('explicit cast_longlong_to_float') return llops.genop('cast_longlong_to_float', [v], resulttype=Float) + if r_from.lowleveltype == UnsignedLongLong and r_to.lowleveltype == Float: + log.debug('explicit cast_ulonglong_to_float') + return llops.genop('cast_ulonglong_to_float', [v], resulttype=Float) return NotImplemented class __extend__(pairtype(FloatRepr, IntegerRepr)): @@ -170,6 +174,9 @@ if r_from.lowleveltype == Float and r_to.lowleveltype == SignedLongLong: log.debug('explicit cast_float_to_longlong') return llops.genop('cast_float_to_longlong', [v], resulttype=SignedLongLong) + if r_from.lowleveltype == Float and r_to.lowleveltype == UnsignedLongLong: + log.debug('explicit cast_float_to_ulonglong') + return llops.genop('cast_float_to_ulonglong', [v], resulttype=UnsignedLongLong) return NotImplemented class __extend__(pairtype(BoolRepr, FloatRepr)): Modified: pypy/branch/fast-forward/pypy/rpython/test/test_rfloat.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/test/test_rfloat.py (original) +++ pypy/branch/fast-forward/pypy/rpython/test/test_rfloat.py Tue Sep 14 18:10:23 2010 @@ -2,8 +2,8 @@ from pypy.translator.translator import TranslationContext from pypy.rpython.test import snippet from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin -from pypy.rlib.rarithmetic import r_int, r_uint, r_longlong, r_singlefloat,\ - isnan, isinf +from pypy.rlib.rarithmetic import ( + r_int, r_uint, r_longlong, r_ulonglong, r_singlefloat) from pypy.rlib.objectmodel import compute_hash class TestSnippet(object): @@ -114,6 +114,21 @@ res = self.interpret(fn, [-9]) assert self.float_eq(res, 0.5 * ((sys.maxint+1)*2 - 9)) + def test_to_r_ulonglong(self): + def fn(x): + return r_ulonglong(x) + res = self.interpret(fn, [12.34]) + assert res == 12 + bigval = sys.maxint * 1.234 + res = self.interpret(fn, [bigval]) + assert long(res) == long(bigval) + + def test_from_r_ulonglong(self): + def fn(n): + return float(r_ulonglong(n)) / 2 + res = self.interpret(fn, [41]) + assert self.float_eq(res, 20.5) + def test_r_singlefloat(self): def fn(x): y = r_singlefloat(x) Modified: pypy/branch/fast-forward/pypy/translator/c/src/float.h ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/src/float.h (original) +++ pypy/branch/fast-forward/pypy/translator/c/src/float.h Tue Sep 14 18:10:23 2010 @@ -36,8 +36,10 @@ #define OP_CAST_INT_TO_FLOAT(x,r) r = (double)(x) #define OP_CAST_UINT_TO_FLOAT(x,r) r = (double)(x) #define OP_CAST_LONGLONG_TO_FLOAT(x,r) r = (double)(x) +#define OP_CAST_ULONGLONG_TO_FLOAT(x,r) r = (double)(x) #define OP_CAST_BOOL_TO_FLOAT(x,r) r = (double)(x) #ifdef HAVE_LONG_LONG #define OP_CAST_FLOAT_TO_LONGLONG(x,r) r = (long long)(x) +#define OP_CAST_FLOAT_TO_ULONGLONG(x,r) r = (unsigned long long)(x) #endif From afa at codespeak.net Tue Sep 14 21:57:33 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 14 Sep 2010 21:57:33 +0200 (CEST) Subject: [pypy-svn] r77071 - in pypy/branch/fast-forward/pypy: module/math/test rpython/lltypesystem/module translator/c/src Message-ID: <20100914195733.0FB01282BEA@codespeak.net> Author: afa Date: Tue Sep 14 21:57:31 2010 New Revision: 77071 Added: pypy/branch/fast-forward/pypy/translator/c/src/math.c (contents, props changed) Modified: pypy/branch/fast-forward/pypy/module/math/test/test_direct.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/module/ll_math.py Log: Fix the math module on Windows, where C99 functions are not available. Steal the code from CPython. Modified: pypy/branch/fast-forward/pypy/module/math/test/test_direct.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/math/test/test_direct.py (original) +++ pypy/branch/fast-forward/pypy/module/math/test/test_direct.py Tue Sep 14 21:57:31 2010 @@ -21,15 +21,20 @@ unary_math_functions = ['acos', 'asin', 'atan', 'ceil', 'cos', 'cosh', 'exp', 'fabs', 'floor', - 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'log', 'log10'] + 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'log', 'log10', + 'acosh', 'asinh', 'atanh', 'log1p', 'expm1'] binary_math_functions = ['atan2', 'fmod', 'hypot', 'pow'] class MathTests: - REGCASES = [ - (name, (0.3,), getattr(math, name)(0.3)) - for name in unary_math_functions] + REGCASES = [] + for name in unary_math_functions: + try: + input, output = (0.3,), getattr(math, name)(0.3) + except ValueError: + input, output = (1.3,), getattr(math, name)(1.3) + REGCASES.append((name, input, output)) IRREGCASES = [ ('atan2', (0.31, 0.123), math.atan2(0.31, 0.123)), Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/module/ll_math.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/module/ll_math.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/module/ll_math.py Tue Sep 14 21:57:31 2010 @@ -5,14 +5,24 @@ from pypy.rpython.lltypesystem import lltype, rffi from pypy.tool.sourcetools import func_with_new_name +from pypy.tool.autopath import pypydir from pypy.rlib import rposix from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rlib.rarithmetic import isinf, isnan, INFINITY, NAN -if sys.platform[:3] == "win": - eci = ExternalCompilationInfo(libraries=[]) +if sys.platform == "win32": + eci = ExternalCompilationInfo() + # Some math functions are C99 and not defined by the Microsoft compiler + srcdir = py.path.local(pypydir).join('translator', 'c', 'src') + math_eci = ExternalCompilationInfo( + separate_module_files=[srcdir.join('math.c')], + export_symbols=['_pypy_math_acosh', '_pypy_math_asinh', + '_pypy_math_atanh', + '_pypy_math_expm1', '_pypy_math_log1p'], + ) else: - eci = ExternalCompilationInfo(libraries=['m']) + eci = ExternalCompilationInfo( + libraries=['m']) def llexternal(name, ARGS, RESULT): return rffi.llexternal(name, ARGS, RESULT, compilation_info=eci, @@ -284,8 +294,13 @@ # # Default implementations -def new_unary_math_function(name, can_overflow): - c_func = llexternal(name, [rffi.DOUBLE], rffi.DOUBLE) +def new_unary_math_function(name, can_overflow, c99): + if sys.platform == 'win32' and c99: + win32name = '_pypy_math_%s' % (name,) + c_func = rffi.llexternal(win32name, [rffi.DOUBLE], rffi.DOUBLE, + compilation_info=math_eci, sandboxsafe=True) + else: + c_func = llexternal(name, [rffi.DOUBLE], rffi.DOUBLE) def ll_math(x): _error_reset() @@ -316,12 +331,16 @@ 'acos', 'asin', 'atan', 'ceil', 'cos', 'cosh', 'exp', 'fabs', 'floor', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'log', 'log10', - 'acosh', 'asinh', 'atanh', 'log1p', 'expm1' # -- added in Python 2.6 + 'acosh', 'asinh', 'atanh', 'log1p', 'expm1', ] unary_math_functions_can_overflow = [ 'cosh', 'exp', 'log1p', 'sinh', 'expm1', ] +unary_math_functions_c99 = [ + 'acosh', 'asinh', 'atanh', 'log1p', 'expm1', + ] for name in unary_math_functions: can_overflow = name in unary_math_functions_can_overflow - globals()['ll_math_' + name] = new_unary_math_function(name, can_overflow) + c99 = name in unary_math_functions_c99 + globals()['ll_math_' + name] = new_unary_math_function(name, can_overflow, c99) Added: pypy/branch/fast-forward/pypy/translator/c/src/math.c ============================================================================== --- (empty file) +++ pypy/branch/fast-forward/pypy/translator/c/src/math.c Tue Sep 14 21:57:31 2010 @@ -0,0 +1,244 @@ +/* Definitions of some C99 math library functions, for those platforms + that don't implement these functions already. */ + +#include + +/* The following macros are copied from CPython header files */ + +#ifdef _MSC_VER +#include +#define PyPy_IS_NAN _isnan +#define PyPy_IS_INFINITY(X) (!_finite(X) && !_isnan(X)) +#define copysign _copysign +#else +#define PyPy_IS_NAN(X) ((X) != (X)) +#define PyPy_IS_INFINITY(X) ((X) && \ + (Py_FORCE_DOUBLE(X)*0.5 == Py_FORCE_DOUBLE(X))) +#endif + +#undef PyPy_NAN + +/* The following copyright notice applies to the original + implementations of acosh, asinh and atanh. */ + +/* + * ==================================================== + * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. + * + * Developed at SunPro, a Sun Microsystems, Inc. business. + * Permission to use, copy, modify, and distribute this + * software is freely granted, provided that this notice + * is preserved. + * ==================================================== + */ + +double _pypy_math_log1p(double x); + +static const double ln2 = 6.93147180559945286227E-01; +static const double two_pow_m28 = 3.7252902984619141E-09; /* 2**-28 */ +static const double two_pow_p28 = 268435456.0; /* 2**28 */ +static const double zero = 0.0; + +/* acosh(x) + * Method : + * Based on + * acosh(x) = log [ x + sqrt(x*x-1) ] + * we have + * acosh(x) := log(x)+ln2, if x is large; else + * acosh(x) := log(2x-1/(sqrt(x*x-1)+x)) if x>2; else + * acosh(x) := log1p(t+sqrt(2.0*t+t*t)); where t=x-1. + * + * Special cases: + * acosh(x) is NaN with signal if x<1. + * acosh(NaN) is NaN without signal. + */ + +double +_pypy_math_acosh(double x) +{ + if (PyPy_IS_NAN(x)) { + return x+x; + } + if (x < 1.) { /* x < 1; return a signaling NaN */ + errno = EDOM; +#ifdef PyPy_NAN + return PyPy_NAN; +#else + return (x-x)/(x-x); +#endif + } + else if (x >= two_pow_p28) { /* x > 2**28 */ + if (PyPy_IS_INFINITY(x)) { + return x+x; + } else { + return log(x)+ln2; /* acosh(huge)=log(2x) */ + } + } + else if (x == 1.) { + return 0.0; /* acosh(1) = 0 */ + } + else if (x > 2.) { /* 2 < x < 2**28 */ + double t = x*x; + return log(2.0*x - 1.0 / (x + sqrt(t - 1.0))); + } + else { /* 1 < x <= 2 */ + double t = x - 1.0; + return _pypy_math_log1p(t + sqrt(2.0*t + t*t)); + } +} + + +/* asinh(x) + * Method : + * Based on + * asinh(x) = sign(x) * log [ |x| + sqrt(x*x+1) ] + * we have + * asinh(x) := x if 1+x*x=1, + * := sign(x)*(log(x)+ln2)) for large |x|, else + * := sign(x)*log(2|x|+1/(|x|+sqrt(x*x+1))) if|x|>2, else + * := sign(x)*log1p(|x| + x^2/(1 + sqrt(1+x^2))) + */ + +double +_pypy_math_asinh(double x) +{ + double w; + double absx = fabs(x); + + if (PyPy_IS_NAN(x) || PyPy_IS_INFINITY(x)) { + return x+x; + } + if (absx < two_pow_m28) { /* |x| < 2**-28 */ + return x; /* return x inexact except 0 */ + } + if (absx > two_pow_p28) { /* |x| > 2**28 */ + w = log(absx)+ln2; + } + else if (absx > 2.0) { /* 2 < |x| < 2**28 */ + w = log(2.0*absx + 1.0 / (sqrt(x*x + 1.0) + absx)); + } + else { /* 2**-28 <= |x| < 2= */ + double t = x*x; + w = _pypy_math_log1p(absx + t / (1.0 + sqrt(1.0 + t))); + } + return copysign(w, x); + +} + +/* atanh(x) + * Method : + * 1.Reduced x to positive by atanh(-x) = -atanh(x) + * 2.For x>=0.5 + * 1 2x x + * atanh(x) = --- * log(1 + -------) = 0.5 * log1p(2 * --------) + * 2 1 - x 1 - x + * + * For x<0.5 + * atanh(x) = 0.5*log1p(2x+2x*x/(1-x)) + * + * Special cases: + * atanh(x) is NaN if |x| >= 1 with signal; + * atanh(NaN) is that NaN with no signal; + * + */ + +double +_pypy_math_atanh(double x) +{ + double absx; + double t; + + if (PyPy_IS_NAN(x)) { + return x+x; + } + absx = fabs(x); + if (absx >= 1.) { /* |x| >= 1 */ + errno = EDOM; +#ifdef PyPy_NAN + return PyPy_NAN; +#else + return x/zero; +#endif + } + if (absx < two_pow_m28) { /* |x| < 2**-28 */ + return x; + } + if (absx < 0.5) { /* |x| < 0.5 */ + t = absx+absx; + t = 0.5 * _pypy_math_log1p(t + t*absx / (1.0 - absx)); + } + else { /* 0.5 <= |x| <= 1.0 */ + t = 0.5 * _pypy_math_log1p((absx + absx) / (1.0 - absx)); + } + return copysign(t, x); +} + +/* Mathematically, expm1(x) = exp(x) - 1. The expm1 function is designed + to avoid the significant loss of precision that arises from direct + evaluation of the expression exp(x) - 1, for x near 0. */ + +double +_pypy_math_expm1(double x) +{ + /* For abs(x) >= log(2), it's safe to evaluate exp(x) - 1 directly; this + also works fine for infinities and nans. + + For smaller x, we can use a method due to Kahan that achieves close to + full accuracy. + */ + + if (fabs(x) < 0.7) { + double u; + u = exp(x); + if (u == 1.0) + return x; + else + return (u - 1.0) * x / log(u); + } + else + return exp(x) - 1.0; +} + +/* log1p(x) = log(1+x). The log1p function is designed to avoid the + significant loss of precision that arises from direct evaluation when x is + small. */ + +double +_pypy_math_log1p(double x) +{ + /* For x small, we use the following approach. Let y be the nearest float + to 1+x, then + + 1+x = y * (1 - (y-1-x)/y) + + so log(1+x) = log(y) + log(1-(y-1-x)/y). Since (y-1-x)/y is tiny, the + second term is well approximated by (y-1-x)/y. If abs(x) >= + DBL_EPSILON/2 or the rounding-mode is some form of round-to-nearest + then y-1-x will be exactly representable, and is computed exactly by + (y-1)-x. + + If abs(x) < DBL_EPSILON/2 and the rounding mode is not known to be + round-to-nearest then this method is slightly dangerous: 1+x could be + rounded up to 1+DBL_EPSILON instead of down to 1, and in that case + y-1-x will not be exactly representable any more and the result can be + off by many ulps. But this is easily fixed: for a floating-point + number |x| < DBL_EPSILON/2., the closest floating-point number to + log(1+x) is exactly x. + */ + + double y; + if (fabs(x) < DBL_EPSILON/2.) { + return x; + } else if (-0.5 <= x && x <= 1.) { + /* WARNING: it's possible than an overeager compiler + will incorrectly optimize the following two lines + to the equivalent of "return log(1.+x)". If this + happens, then results from log1p will be inaccurate + for small x. */ + y = 1.+x; + return log(y)-((y-1.)-x)/y; + } else { + /* NaNs and infinities should end up here */ + return log(1.+x); + } +} From afa at codespeak.net Tue Sep 14 21:59:32 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 14 Sep 2010 21:59:32 +0200 (CEST) Subject: [pypy-svn] r77072 - pypy/branch/fast-forward/pypy/module/math/test Message-ID: <20100914195932.9C426282BEA@codespeak.net> Author: afa Date: Tue Sep 14 21:59:31 2010 New Revision: 77072 Modified: pypy/branch/fast-forward/pypy/module/math/test/test_direct.py Log: Try to let tests pass when run on top of an older CPython Modified: pypy/branch/fast-forward/pypy/module/math/test/test_direct.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/math/test/test_direct.py (original) +++ pypy/branch/fast-forward/pypy/module/math/test/test_direct.py Tue Sep 14 21:59:31 2010 @@ -32,6 +32,9 @@ for name in unary_math_functions: try: input, output = (0.3,), getattr(math, name)(0.3) + except AttributeError: + # cannot test this function + pass except ValueError: input, output = (1.3,), getattr(math, name)(1.3) REGCASES.append((name, input, output)) From afa at codespeak.net Tue Sep 14 22:02:41 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 14 Sep 2010 22:02:41 +0200 (CEST) Subject: [pypy-svn] r77073 - pypy/branch/fast-forward/pypy/module/math/test Message-ID: <20100914200241.D0233282BEA@codespeak.net> Author: afa Date: Tue Sep 14 22:02:40 2010 New Revision: 77073 Modified: pypy/branch/fast-forward/pypy/module/math/test/test_direct.py Log: Fix test Modified: pypy/branch/fast-forward/pypy/module/math/test/test_direct.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/math/test/test_direct.py (original) +++ pypy/branch/fast-forward/pypy/module/math/test/test_direct.py Tue Sep 14 22:02:40 2010 @@ -34,7 +34,7 @@ input, output = (0.3,), getattr(math, name)(0.3) except AttributeError: # cannot test this function - pass + continue except ValueError: input, output = (1.3,), getattr(math, name)(1.3) REGCASES.append((name, input, output)) From benjamin at codespeak.net Tue Sep 14 23:54:28 2010 From: benjamin at codespeak.net (benjamin at codespeak.net) Date: Tue, 14 Sep 2010 23:54:28 +0200 (CEST) Subject: [pypy-svn] r77074 - pypy/branch/fast-forward/pypy/rlib/rstruct Message-ID: <20100914215428.F1E93282BEA@codespeak.net> Author: benjamin Date: Tue Sep 14 23:54:26 2010 New Revision: 77074 Modified: pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py Log: mant should always be a float Modified: pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py Tue Sep 14 23:54:26 2010 @@ -50,18 +50,18 @@ one = r_ulonglong(1) sign = rarithmetic.intmask(Q >> BITS - 1) exp = rarithmetic.intmask((Q & ((one << BITS - 1) - (one << MANT_DIG - 1))) >> MANT_DIG - 1) - mant = Q & ((one << MANT_DIG - 1) - 1) + mant = Q & ((1 << MANT_DIG - 1) - 1) if exp == MAX_EXP - MIN_EXP + 2: # nan or infinity result = float('nan') if mant else float('inf') elif exp == 0: # subnormal or zero - result = math.ldexp(float(mant), MIN_EXP - MANT_DIG) + result = math.ldexp(mant, MIN_EXP - MANT_DIG) else: # normal - mant += r_ulonglong(1) << MANT_DIG - 1 - result = math.ldexp(float(mant), exp + MIN_EXP - MANT_DIG - 1) + mant += 1 << MANT_DIG - 1 + result = math.ldexp(mant, exp + MIN_EXP - MANT_DIG - 1) return -result if sign else result From benjamin at codespeak.net Wed Sep 15 02:04:22 2010 From: benjamin at codespeak.net (benjamin at codespeak.net) Date: Wed, 15 Sep 2010 02:04:22 +0200 (CEST) Subject: [pypy-svn] r77075 - pypy/branch/fast-forward/pypy/rlib/rstruct Message-ID: <20100915000422.0F5CA282BEA@codespeak.net> Author: benjamin Date: Wed Sep 15 02:04:21 2010 New Revision: 77075 Modified: pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py Log: simply Modified: pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py Wed Sep 15 02:04:21 2010 @@ -21,7 +21,7 @@ """ int_part = r_ulonglong(x) frac_part = x - int_part - if frac_part > 0.5 or frac_part == 0.5 and int_part & 1 == 1: + if frac_part > 0.5 or frac_part == 0.5 and int_part & 1: int_part += 1 return int_part From arigo at codespeak.net Wed Sep 15 10:00:54 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 15 Sep 2010 10:00:54 +0200 (CEST) Subject: [pypy-svn] r77076 - in pypy/branch/gen2-gc/pypy/rpython/memory/gc: . test Message-ID: <20100915080054.B4309282BEA@codespeak.net> Author: arigo Date: Wed Sep 15 10:00:52 2010 New Revision: 77076 Added: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py (contents, props changed) pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py - copied unchanged from r77060, pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimark.py Removed: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimark.py Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/generation.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py Log: Split the ArenaCollection into its own file. Finish a first draft of the collector itself, tested using a simple debugging ArenaCollection. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/generation.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/generation.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/generation.py Wed Sep 15 10:00:52 2010 @@ -449,7 +449,7 @@ # for the JIT: a minimal description of the write_barrier() method # (the JIT assumes it is of the shape - # "if newvalue.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") + # "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS def write_barrier(self, newvalue, addr_struct): Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Wed Sep 15 10:00:52 2010 @@ -1,15 +1,35 @@ -from pypy.rpython.lltypesystem import lltype, llmemory, llarena, rffi +from pypy.rpython.lltypesystem import lltype, llmemory, llarena, rffi, llgroup +from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.memory.gc.base import MovingGCBase from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE from pypy.rlib.rarithmetic import LONG_BIT from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.debug import ll_assert +from pypy.rlib.debug import ll_assert, debug_print WORD = LONG_BIT // 8 NULL = llmemory.NULL first_gcflag = 1 << (LONG_BIT//2) -GCFLAG_BIG = first_gcflag + +# The following flag is never set on young objects, i.e. the ones living +# in the nursery. It is initially set on all prebuilt and old objects, +# and gets cleared by the write_barrier() when we write in them a +# pointer to a young object. +GCFLAG_NO_YOUNG_PTRS = first_gcflag << 0 + +# The following flag is set on some prebuilt objects. The flag is set +# unless the object is already listed in 'prebuilt_root_objects'. +# When a pointer is written inside an object with GCFLAG_NO_HEAP_PTRS +# set, the write_barrier clears the flag and adds the object to +# 'prebuilt_root_objects'. +GCFLAG_NO_HEAP_PTRS = first_gcflag << 1 + +# The following flag is set on surviving objects during a major collection. +GCFLAG_VISITED = first_gcflag << 2 + +# Marker set to 'tid' during a minor collection when an object from +# the nursery was forwarded. +FORWARDED_MARKER = -1 # ____________________________________________________________ @@ -17,12 +37,28 @@ _alloc_flavor_ = "raw" inline_simple_malloc = True inline_simple_malloc_varsize = True - malloc_zero_filled = True - + needs_write_barrier = True + prebuilt_gc_objects_are_static_roots = False + malloc_zero_filled = True # XXX experiment with False + + # All objects start with a HDR, i.e. with a field 'tid' which contains + # a word. This word is divided in two halves: the lower half contains + # the typeid, and the upper half contains various flags, as defined + # by GCFLAG_xxx above. HDR = lltype.Struct('header', ('tid', lltype.Signed)) typeid_is_in_field = 'tid' #withhash_flag_is_in_field = 'tid', _GCFLAG_HASH_BASE * 0x2 + # During a minor collection, the objects in the nursery that are + # moved outside are changed in-place: their header is replaced with + # FORWARDED_MARKER, and the following word is set to the address of + # where the object was moved. This means that all objects in the + # nursery need to be at least 2 words long, but objects outside the + # nursery don't need to. + minimal_size_in_nursery = (llmemory.sizeof(HDR) + + llmemory.sizeof(llmemory.Address)) + + TRANSLATION_PARAMS = { # The size of the nursery. -1 means "auto", which means that it # will look it up in the env var PYPY_GENERATIONGC_NURSERY and @@ -45,259 +81,456 @@ def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE, nursery_size=32*WORD, page_size=16*WORD, - arena_size=48*WORD, - small_request_threshold=5*WORD): + arena_size=64*WORD, + small_request_threshold=5*WORD, + ArenaCollectionClass=None): MovingGCBase.__init__(self, config, chunk_size) self.nursery_size = nursery_size - self.page_size = page_size - self.arena_size = arena_size self.small_request_threshold = small_request_threshold + # + # The ArenaCollection() handles the nonmovable objects allocation. + if ArenaCollectionClass is None: + ArenaCollectionClass = ArenaCollection + self.ac = ArenaCollectionClass(arena_size, page_size, + small_request_threshold) + # + # A list of all raw_malloced objects (the objects too large) + self.rawmalloced_objects = self.AddressStack() + # + # Used by minor collection: a list of non-young objects that + # (may) contain a pointer to a young object. Populated by + # the write barrier. + self.old_objects_pointing_to_young = self.AddressStack() + # + # A list of all prebuilt GC objects that contain pointers to the heap + self.prebuilt_root_objects = self.AddressStack() + # + self._init_writebarrier_logic() + def setup(self): - pass + """Called at run-time to initialize the GC.""" + # + assert self.nursery_size > 0, "XXX" + # + # the start of the nursery: we actually allocate a tiny bit more for + # the nursery than really needed, to simplify pointer arithmetic + # in malloc_fixedsize_clear(). + extra = self.small_request_threshold + self.nursery = llarena.arena_malloc(self.nursery_size + extra, True) + if not self.nursery: + raise MemoryError("cannot allocate nursery") + # the current position in the nursery: + self.nursery_next = self.nursery + # the end of the nursery: + self.nursery_top = self.nursery + self.nursery_size + + + def malloc_fixedsize_clear(self, typeid, size, can_collect=True, + needs_finalizer=False, contains_weakptr=False): + ll_assert(can_collect, "!can_collect") + assert not needs_finalizer # XXX + assert not contains_weakptr # XXX + size_gc_header = self.gcheaderbuilder.size_gc_header + totalsize = size_gc_header + size + # + # If totalsize is greater than small_request_threshold, ask for + # a rawmalloc. The following check should be constant-folded. + if llmemory.raw_malloc_usage(totalsize) > self.small_request_threshold: + return self.external_malloc(typeid, size) + # + # If totalsize is smaller than minimal_size_in_nursery, round it up. + # The following check should also be constant-folded. + if (llmemory.raw_malloc_usage(totalsize) < + llmemory.raw_malloc_usage(self.minimal_size_in_nursery)): + totalsize = self.minimal_size_in_nursery + # + # Get the memory from the nursery. If there is not enough space + # there, do a collect first. + result = self.nursery_next + self.nursery_next = result + totalsize + if self.nursery_next > self.nursery_top: + result = self.collect_and_reserve(totalsize) + # + # Build the object. + llarena.arena_reserve(result, totalsize) + self.init_gc_object(result, typeid, flags=0) + return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF) + + + def collect(self, gen=1): + """Do a minor (gen=0) or major (gen>0) collection.""" + self.minor_collection() + if gen > 0: + self.major_collection() + + def collect_and_reserve(self, totalsize): + """To call when nursery_next overflows nursery_top. + Do a minor collection, and possibly also a major collection, + and finally reserve 'totalsize' bytes at the start of the + now-empty nursery. + """ + self.collect(0) # XXX + self.nursery_next = self.nursery + totalsize + return self.nursery + collect_and_reserve._dont_inline_ = True + + + def external_malloc(self, typeid, size): + """Allocate a large object using raw_malloc().""" + # + # First check if we are called because we wanted to allocate + # an object that is larger than self.small_request_threshold. + size_gc_header = self.gcheaderbuilder.size_gc_header + totalsize = size_gc_header + size + xxxxxxx + # + # Yes: just use a raw_malloc() to get the object. + result = llmemory.raw_malloc(totalsize) + if not result: + raise MemoryError() + raw_memclear(result, totalsize) + self.rawmalloced_objects.append(result + size_gc_header) + return result -# ____________________________________________________________ -# Terminology: the memory is subdivided into "pages". -# A page contains a number of allocated objects, called "blocks". + # ---------- + # Simple helpers + + def get_type_id(self, obj): + tid = self.header(obj).tid + return llop.extract_ushort(llgroup.HALFWORD, tid) + + def combine(self, typeid16, flags): + return llop.combine_ushort(lltype.Signed, typeid16, flags) + + def init_gc_object(self, addr, typeid16, flags=0): + #print "init_gc_object(%r, 0x%x)" % (addr, flags) + # The default 'flags' is zero. The flags GCFLAG_NO_xxx_PTRS + # have been chosen to allow 'flags' to be zero in the common + # case (hence the 'NO' in their name). + hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR)) + hdr.tid = self.combine(typeid16, flags) + + def init_gc_object_immortal(self, addr, typeid16, flags=0): + # For prebuilt GC objects, the flags must contain + # GCFLAG_NO_xxx_PTRS, at least initially. + flags |= GCFLAG_NO_HEAP_PTRS | GCFLAG_NO_YOUNG_PTRS + self.init_gc_object(addr, typeid16, flags) + + def is_in_nursery(self, addr): + ll_assert(llmemory.cast_adr_to_int(addr) & 1 == 0, + "odd-valued (i.e. tagged) pointer unexpected here") + return self.nursery <= addr < self.nursery_top + + def is_forwarded_marker(self, tid): + return isinstance(tid, int) and tid == FORWARDED_MARKER + + def debug_check_object(self, obj): + # after a minor or major collection, no object should be in the nursery + ll_assert(not self.is_in_nursery(obj), + "object in nursery after collection") + # similarily, all objects should have this flag: + ll_assert(self.header(obj).tid & GCFLAG_NO_YOUNG_PTRS, + "missing GCFLAG_NO_YOUNG_PTRS") + # the GCFLAG_VISITED should not be set between collections + ll_assert(self.header(obj).tid & GCFLAG_VISITED == 0, + "unexpected GCFLAG_VISITED") + + # ---------- + # Write barrier + + # for the JIT: a minimal description of the write_barrier() method + # (the JIT assumes it is of the shape + # "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") + JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS + + def write_barrier(self, newvalue, addr_struct): + if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS: + self.remember_young_pointer(addr_struct, newvalue) + + def _init_writebarrier_logic(self): + # The purpose of attaching remember_young_pointer to the instance + # instead of keeping it as a regular method is to help the JIT call it. + # Additionally, it makes the code in write_barrier() marginally smaller + # (which is important because it is inlined *everywhere*). + # For x86, there is also an extra requirement: when the JIT calls + # remember_young_pointer(), it assumes that it will not touch the SSE + # registers, so it does not save and restore them (that's a *hack*!). + def remember_young_pointer(addr_struct, addr): + # 'addr_struct' is the address of the object in which we write; + # 'addr' is the address that we write in 'addr_struct'. + ll_assert(not self.is_in_nursery(addr_struct), + "nursery object with GCFLAG_NO_YOUNG_PTRS") + # if we have tagged pointers around, we first need to check whether + # we have valid pointer here, otherwise we can do it after the + # is_in_nursery check + if (self.config.taggedpointers and + not self.is_valid_gc_object(addr)): + return + # + # Core logic: if the 'addr' is in the nursery, then we need + # to remove the flag GCFLAG_NO_YOUNG_PTRS and add the old object + # to the list 'old_objects_pointing_to_young'. We know that + # 'addr_struct' cannot be in the nursery, because nursery objects + # never have the flag GCFLAG_NO_YOUNG_PTRS to start with. + objhdr = self.header(addr_struct) + if self.is_in_nursery(addr): + self.old_objects_pointing_to_young.append(addr_struct) + objhdr.tid &= ~GCFLAG_NO_YOUNG_PTRS + elif (not self.config.taggedpointers and + not self.is_valid_gc_object(addr)): + return + # + # Second part: if 'addr_struct' is actually a prebuilt GC + # object and it's the first time we see a write to it, we + # add it to the list 'prebuilt_root_objects'. Note that we + # do it even in the (rare?) case of 'addr' being another + # prebuilt object, to simplify code. + if objhdr.tid & GCFLAG_NO_HEAP_PTRS: + objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS + self.prebuilt_root_objects.append(addr_struct) + + remember_young_pointer._dont_inline_ = True + self.remember_young_pointer = remember_young_pointer + + + # ---------- + # Nursery collection + + def minor_collection(self): + """Perform a minor collection: find the objects from the nursery + that remain alive and move them out.""" + # + #print "nursery_collect()" + # + # First, find the roots that point to nursery objects. These + # nursery objects are copied out of the nursery. Note that + # references to further nursery objects are not modified by + # this step; only objects directly referenced by roots are + # copied out. They are also added to the list + # 'old_objects_pointing_to_young'. + self.collect_roots_in_nursery() + # + # Now trace objects from 'old_objects_pointing_to_young'. + # All nursery objects they reference are copied out of the + # nursery, and again added to 'old_objects_pointing_to_young'. + # We proceed until 'old_objects_pointing_to_young' is empty. + self.collect_oldrefs_to_nursery() + # + # Now all live nursery objects should be out, and the rest dies. + # Fill the whole nursery with zero and reset the current nursery + # pointer. + llarena.arena_reset(self.nursery, self.nursery_size, 2) + self.nursery_next = self.nursery + # + self.debug_check_consistency() # XXX expensive! + + + def collect_roots_in_nursery(self): + # we don't need to trace prebuilt GcStructs during a minor collect: + # if a prebuilt GcStruct contains a pointer to a young object, + # then the write_barrier must have ensured that the prebuilt + # GcStruct is in the list self.old_objects_pointing_to_young. + self.root_walker.walk_roots( + MiniMarkGC._trace_drag_out, # stack roots + MiniMarkGC._trace_drag_out, # static in prebuilt non-gc + None) # static in prebuilt gc + + def collect_oldrefs_to_nursery(self): + # Follow the old_objects_pointing_to_young list and move the + # young objects they point to out of the nursery. + oldlist = self.old_objects_pointing_to_young + while oldlist.non_empty(): + obj = oldlist.pop() + # + # Add the flag GCFLAG_NO_YOUNG_PTRS. All live objects should have + # this flag after a nursery collection. + self.header(obj).tid |= GCFLAG_NO_YOUNG_PTRS + # + # Trace the 'obj' to replace pointers to nursery with pointers + # outside the nursery, possibly forcing nursery objects out + # and adding them to 'old_objects_pointing_to_young' as well. + self.trace_and_drag_out_of_nursery(obj) + + def trace_and_drag_out_of_nursery(self, obj): + """obj must not be in the nursery. This copies all the + young objects it references out of the nursery. + """ + self.trace(obj, self._trace_drag_out, None) + + + def _trace_drag_out(self, root, ignored=None): + obj = root.address[0] + # + # If 'obj' is not in the nursery, nothing to change. + if not self.is_in_nursery(obj): + return + #size_gc_header = self.gcheaderbuilder.size_gc_header + #print '\ttrace_drag_out', llarena.getfakearenaaddress(obj - size_gc_header), + # + # If 'obj' was already forwarded, change it to its forwarding address. + if self.is_forwarded_marker(self.header(obj).tid): + obj = llarena.getfakearenaaddress(obj) + root.address[0] = obj.address[0] + #print '(already forwarded)' + return + # + # First visit to 'obj': we must move it out of the nursery. + # Allocate a new nonmovable location for it. + size_gc_header = self.gcheaderbuilder.size_gc_header + size = self.get_size(obj) + totalsize = size_gc_header + size + newhdr = self.ac.malloc(totalsize) + newobj = newhdr + size_gc_header + # + # Copy it. Note that references to other objects in the + # nursery are kept unchanged in this step. + llmemory.raw_memcopy(obj - size_gc_header, newhdr, totalsize) + # + # Set the old object's tid to FORWARDED_MARKER and replace + # the old object's content with the target address. + # A bit of no-ops to convince llarena that we are changing + # the layout, in non-translated versions. + llarena.arena_reset(obj - size_gc_header, totalsize, 0) + llarena.arena_reserve(obj - size_gc_header, llmemory.sizeof(self.HDR)) + llarena.arena_reserve(obj, llmemory.sizeof(llmemory.Address)) + self.header(obj).tid = FORWARDED_MARKER + obj = llarena.getfakearenaaddress(obj) + obj.address[0] = newobj + # + # Change the original pointer to this object. + #print + #print '\t\t\t->', llarena.getfakearenaaddress(newobj - size_gc_header) + root.address[0] = newobj + # + # Add the newobj to the list 'old_objects_pointing_to_young', + # because it can contain further pointers to other young objects. + # We will fix such references to point to the copy of the young + # objects when we walk 'old_objects_pointing_to_young'. + self.old_objects_pointing_to_young.append(newobj) + + + # ---------- + # Full collection + + def major_collection(self): + """Do a major collection. Only for when the nursery is empty.""" + # + # Debugging checks + ll_assert(self.nursery_next == self.nursery, + "nursery not empty in major_collection()") + self.debug_check_consistency() + # + # Note that a major collection is non-moving. The goal is only to + # find and free some of the objects allocated by the ArenaCollection. + # We first visit all objects and toggle the flag GCFLAG_VISITED on + # them, starting from the roots. + self.collect_roots() + self.visit_all_objects() + # + # Ask the ArenaCollection to visit all objects. Free the ones + # that have not been visited above, and reset GCFLAG_VISITED on + # the others. + self.ac.mass_free(self._free_if_unvisited) + # + # We also need to reset the GCFLAG_VISITED on prebuilt GC objects. + self.prebuilt_root_objects.foreach(self._reset_gcflag_visited, None) + # + self.debug_check_consistency() + + + def _free_if_unvisited(self, hdr): + size_gc_header = self.gcheaderbuilder.size_gc_header + obj = hdr + size_gc_header + if self.header(obj).tid & GCFLAG_VISITED: + self.header(obj).tid &= ~GCFLAG_VISITED + return False # survives + else: + return True # dies + + def _reset_gcflag_visited(self, obj, ignored=None): + self.header(obj).tid &= ~GCFLAG_VISITED + + + def collect_roots(self): + # Collect all roots. Starts from all the objects + # from 'prebuilt_root_objects'. + self.objects_to_trace = self.AddressStack() + self.prebuilt_root_objects.foreach(self._collect_obj, None) + # + # Add the roots from the other sources. + self.root_walker.walk_roots( + MiniMarkGC._collect_ref, # stack roots + MiniMarkGC._collect_ref, # static in prebuilt non-gc structures + None) # we don't need the static in all prebuilt gc objects + + def _collect_obj(self, obj, ignored=None): + self.objects_to_trace.append(obj) + + def _collect_ref(self, root, ignored=None): + self.objects_to_trace.append(root.address[0]) + + def visit_all_objects(self): + pending = self.objects_to_trace + while pending.non_empty(): + obj = pending.pop() + self.visit(obj) + pending.delete() + + def visit(self, obj): + # + # 'obj' is a live object. Check GCFLAG_VISITED to know if we + # have already seen it before. + # + # Moreover, we can ignore prebuilt objects with GCFLAG_NO_HEAP_PTRS. + # If they have this flag set, then they cannot point to heap + # objects, so ignoring them is fine. If they don't have this + # flag set, then the object should be in 'prebuilt_root_objects', + # and the GCFLAG_VISITED will be reset at the end of the + # collection. + hdr = self.header(obj) + if hdr.tid & (GCFLAG_VISITED | GCFLAG_NO_HEAP_PTRS): + return + # + # It's the first time. We set the flag. + hdr.tid |= GCFLAG_VISITED + # + # Trace the content of the object and put all objects it references + # into the 'objects_to_trace' list. + self.trace(obj, self._collect_ref, None) -# The actual allocation occurs in whole arenas, which are subdivided -# into pages. We don't keep track of the arenas. A page can be: -# -# - uninitialized: never touched so far. -# -# - allocated: contains some objects (all of the same size). Starts with a -# PAGE_HEADER. The page is on the chained list of pages that still have -# room for objects of that size, unless it is completely full. -# -# - free: used to be partially full, and is now free again. The page is -# on the chained list of free pages. - -# Similarily, each allocated page contains blocks of a given size, which can -# be either uninitialized, allocated or free. - -PAGE_PTR = lltype.Ptr(lltype.ForwardReference()) -PAGE_HEADER = lltype.Struct('PageHeader', - # -- The following two pointers make a chained list of pages with the same - # size class. Warning, 'prevpage' contains random garbage for the first - # entry in the list. - ('nextpage', PAGE_PTR), - ('prevpage', PAGE_PTR), - # -- The number of free blocks, and the number of uninitialized blocks. - # The number of allocated blocks is the rest. - ('nuninitialized', lltype.Signed), - ('nfree', lltype.Signed), - # -- The chained list of free blocks. If there are none, points to the - # first uninitialized block. - ('freeblock', llmemory.Address), - ) -PAGE_PTR.TO.become(PAGE_HEADER) -PAGE_NULL = lltype.nullptr(PAGE_HEADER) # ____________________________________________________________ +# For testing, a simple implementation of ArenaCollection. +# This version could be used together with obmalloc.c, but +# it requires an extra word per object in the 'all_objects' +# list. -class ArenaCollection(object): - _alloc_flavor_ = "raw" +class SimpleArenaCollection(object): def __init__(self, arena_size, page_size, small_request_threshold): - self.arena_size = arena_size + self.arena_size = arena_size # ignored self.page_size = page_size self.small_request_threshold = small_request_threshold - # - # 'pageaddr_for_size': for each size N between WORD and - # small_request_threshold (included), contains either NULL or - # a pointer to a page that has room for at least one more - # allocation of the given size. - length = small_request_threshold / WORD + 1 - self.page_for_size = lltype.malloc(rffi.CArray(PAGE_PTR), length, - flavor='raw', zero=True) - self.nblocks_for_size = lltype.malloc(rffi.CArray(lltype.Signed), - length, flavor='raw') - hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) - for i in range(1, length): - self.nblocks_for_size[i] = (page_size - hdrsize) // (WORD * i) - # - self.uninitialized_pages = PAGE_NULL - self.num_uninitialized_pages = 0 - self.free_pages = PAGE_NULL - + self.all_objects = [] def malloc(self, size): - """Allocate a block from a page in an arena.""" - ll_assert(size > 0, "malloc: size is null or negative") - ll_assert(size <= self.small_request_threshold, "malloc: size too big") - ll_assert((size & (WORD-1)) == 0, "malloc: size is not aligned") - # - # Get the page to use from the size - size_class = size / WORD - page = self.page_for_size[size_class] - if page == PAGE_NULL: - page = self.allocate_new_page(size_class) - # - # The result is simply 'page.freeblock' - result = page.freeblock - if page.nfree > 0: - # - # The 'result' was part of the chained list; read the next. - page.nfree -= 1 - page.freeblock = result.address[0] - llarena.arena_reset(result, - llmemory.sizeof(llmemory.Address), - False) - # - else: - # The 'result' is part of the uninitialized blocks. - ll_assert(page.nuninitialized > 0, - "fully allocated page found in the page_for_size list") - page.freeblock = result + size - page.nuninitialized -= 1 - if page.nuninitialized == 0: - # - # This was the last free block, so unlink the page from the - # chained list. - self.page_for_size[size_class] = page.nextpage - # - llarena.arena_reserve(result, _dummy_size(size), False) - return result - - - def allocate_new_page(self, size_class): - """Allocate and return a new page for the given size_class.""" + nsize = llmemory.raw_malloc_usage(size) + ll_assert(nsize > 0, "malloc: size is null or negative") + ll_assert(nsize <= self.small_request_threshold,"malloc: size too big") + ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned") # - if self.free_pages != PAGE_NULL: - # - # Get the page from the chained list 'free_pages'. - page = self.free_pages - self.free_pages = page.address[0] - llarena.arena_reset(self.free_pages, - llmemory.sizeof(llmemory.Address), - False) - else: - # Get the next free page from the uninitialized pages. - if self.num_uninitialized_pages == 0: - self.allocate_new_arena() # Out of memory. Get a new arena. - page = self.uninitialized_pages - self.uninitialized_pages += self.page_size - self.num_uninitialized_pages -= 1 - # - # Initialize the fields of the resulting page - llarena.arena_reserve(page, llmemory.sizeof(PAGE_HEADER)) - result = llmemory.cast_adr_to_ptr(page, PAGE_PTR) - # - hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) - result.nuninitialized = self.nblocks_for_size[size_class] - result.nfree = 0 - result.freeblock = page + hdrsize - result.nextpage = PAGE_NULL - ll_assert(self.page_for_size[size_class] == PAGE_NULL, - "allocate_new_page() called but a page is already waiting") - self.page_for_size[size_class] = result + result = llmemory.raw_malloc(size) + self.all_objects.append(result) return result - - def allocate_new_arena(self): - ll_assert(self.num_uninitialized_pages == 0, - "some uninitialized pages are already waiting") - # - # 'arena_base' points to the start of malloced memory; it might not - # be a page-aligned address - arena_base = llarena.arena_malloc(self.arena_size, False) - if not arena_base: - raise MemoryError("couldn't allocate the next arena") - arena_end = arena_base + self.arena_size - # - # 'firstpage' points to the first unused page - firstpage = start_of_page(arena_base + self.page_size - 1, - self.page_size) - # 'npages' is the number of full pages just allocated - npages = (arena_end - firstpage) // self.page_size - # - # add these pages to the list - self.uninitialized_pages = firstpage - self.num_uninitialized_pages = npages - # - # increase a bit arena_size for the next time - self.arena_size = (self.arena_size // 4 * 5) + (self.page_size - 1) - self.arena_size = (self.arena_size // self.page_size) * self.page_size - - - def free(self, obj, size): - """Free a previously malloc'ed block.""" - ll_assert(size > 0, "free: size is null or negative") - ll_assert(size <= self.small_request_threshold, "free: size too big") - ll_assert((size & (WORD-1)) == 0, "free: size is not aligned") - # - llarena.arena_reset(obj, _dummy_size(size), False) - pageaddr = start_of_page(obj, self.page_size) - if not we_are_translated(): - hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) - assert obj - pageaddr >= hdrsize - assert (obj - pageaddr - hdrsize) % size == 0 - page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR) - size_class = size / WORD - # - # Increment the number of known free objects - nfree = page.nfree + 1 - if nfree < self.nblocks_for_size[size_class]: - # - # Not all objects in this page are freed yet. - # Add the free block to the chained list. - page.nfree = nfree - llarena.arena_reserve(obj, llmemory.sizeof(llmemory.Address), - False) - obj.address[0] = page.freeblock - page.freeblock = obj - # - # If the page was full, then it now has space and should be - # linked back in the page_for_size[] linked list. - if nfree == 1: - page.nextpage = self.page_for_size[size_class] - if page.nextpage != PAGE_NULL: - page.nextpage.prevpage = page - self.page_for_size[size_class] = page - # - else: - # The page becomes completely free. Remove it from - # the page_for_size[] linked list. - if page == self.page_for_size[size_class]: - self.page_for_size[size_class] = page.nextpage + def mass_free(self, ok_to_free_func): + objs = self.all_objects + self.all_objects = [] + for rawobj in objs: + if ok_to_free_func(rawobj): + llmemory.raw_free(rawobj) else: - prev = page.prevpage - next = page.nextpage - prev.nextpage = next - next.prevpage = prev - # - # Free the page, putting it back in the chained list of the arena - # where it belongs - xxx#... - - -# ____________________________________________________________ -# Helpers to go from a pointer to the start of its page - -def start_of_page(addr, page_size): - """Return the address of the start of the page that contains 'addr'.""" - if we_are_translated(): - xxx - else: - return _start_of_page_untranslated(addr, page_size) - -def _start_of_page_untranslated(addr, page_size): - assert isinstance(addr, llarena.fakearenaaddress) - shift = 4 # for testing, we assume that the whole arena is not - # on a page boundary - ofs = ((addr.offset - shift) // page_size) * page_size + shift - return llarena.fakearenaaddress(addr.arena, ofs) - -def _dummy_size(size): - if we_are_translated(): - return size - if isinstance(size, int): - size = llmemory.sizeof(lltype.Char) * size - return size - -# ____________________________________________________________ - -def nursery_size_from_env(): - return read_from_env('PYPY_GENERATIONGC_NURSERY') + self.all_objects.append(rawobj) Added: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py ============================================================================== --- (empty file) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py Wed Sep 15 10:00:52 2010 @@ -0,0 +1,241 @@ + +# Terminology: the memory is subdivided into "pages". +# A page contains a number of allocated objects, called "blocks". + +# The actual allocation occurs in whole arenas, which are subdivided +# into pages. We don't keep track of the arenas. A page can be: +# +# - uninitialized: never touched so far. +# +# - allocated: contains some objects (all of the same size). Starts with a +# PAGE_HEADER. The page is on the chained list of pages that still have +# room for objects of that size, unless it is completely full. +# +# - free: used to be partially full, and is now free again. The page is +# on the chained list of free pages. + +# Similarily, each allocated page contains blocks of a given size, which can +# be either uninitialized, allocated or free. + +PAGE_PTR = lltype.Ptr(lltype.ForwardReference()) +PAGE_HEADER = lltype.Struct('PageHeader', + # -- The following two pointers make a chained list of pages with the same + # size class. Warning, 'prevpage' contains random garbage for the first + # entry in the list. + ('nextpage', PAGE_PTR), + ('prevpage', PAGE_PTR), + # -- The number of free blocks, and the number of uninitialized blocks. + # The number of allocated blocks is the rest. + ('nuninitialized', lltype.Signed), + ('nfree', lltype.Signed), + # -- The chained list of free blocks. If there are none, points to the + # first uninitialized block. + ('freeblock', llmemory.Address), + ) +PAGE_PTR.TO.become(PAGE_HEADER) +PAGE_NULL = lltype.nullptr(PAGE_HEADER) + +# ---------- + + +class ArenaCollection(object): + _alloc_flavor_ = "raw" + + def __init__(self, arena_size, page_size, small_request_threshold): + self.arena_size = arena_size + self.page_size = page_size + self.small_request_threshold = small_request_threshold + # + # 'pageaddr_for_size': for each size N between WORD and + # small_request_threshold (included), contains either NULL or + # a pointer to a page that has room for at least one more + # allocation of the given size. + length = small_request_threshold / WORD + 1 + self.page_for_size = lltype.malloc(rffi.CArray(PAGE_PTR), length, + flavor='raw', zero=True) + self.nblocks_for_size = lltype.malloc(rffi.CArray(lltype.Signed), + length, flavor='raw') + hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) + for i in range(1, length): + self.nblocks_for_size[i] = (page_size - hdrsize) // (WORD * i) + # + self.uninitialized_pages = PAGE_NULL + self.num_uninitialized_pages = 0 + self.free_pages = PAGE_NULL + + + def malloc(self, size): + """Allocate a block from a page in an arena.""" + nsize = llmemory.raw_malloc_usage(size) + ll_assert(nsize > 0, "malloc: size is null or negative") + ll_assert(nsize <= self.small_request_threshold,"malloc: size too big") + ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned") + # + # Get the page to use from the size + size_class = nsize / WORD + page = self.page_for_size[size_class] + if page == PAGE_NULL: + page = self.allocate_new_page(size_class) + # + # The result is simply 'page.freeblock' + result = page.freeblock + if page.nfree > 0: + # + # The 'result' was part of the chained list; read the next. + page.nfree -= 1 + page.freeblock = result.address[0] + llarena.arena_reset(result, + llmemory.sizeof(llmemory.Address), + False) + # + else: + # The 'result' is part of the uninitialized blocks. + ll_assert(page.nuninitialized > 0, + "fully allocated page found in the page_for_size list") + page.freeblock = result + nsize + page.nuninitialized -= 1 + if page.nuninitialized == 0: + # + # This was the last free block, so unlink the page from the + # chained list. + self.page_for_size[size_class] = page.nextpage + # + llarena.arena_reserve(result, _dummy_size(size), False) + return result + + + def allocate_new_page(self, size_class): + """Allocate and return a new page for the given size_class.""" + # + if self.free_pages != PAGE_NULL: + # + # Get the page from the chained list 'free_pages'. + page = self.free_pages + self.free_pages = page.address[0] + llarena.arena_reset(self.free_pages, + llmemory.sizeof(llmemory.Address), + False) + else: + # Get the next free page from the uninitialized pages. + if self.num_uninitialized_pages == 0: + self.allocate_new_arena() # Out of memory. Get a new arena. + page = self.uninitialized_pages + self.uninitialized_pages += self.page_size + self.num_uninitialized_pages -= 1 + # + # Initialize the fields of the resulting page + llarena.arena_reserve(page, llmemory.sizeof(PAGE_HEADER)) + result = llmemory.cast_adr_to_ptr(page, PAGE_PTR) + # + hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) + result.nuninitialized = self.nblocks_for_size[size_class] + result.nfree = 0 + result.freeblock = page + hdrsize + result.nextpage = PAGE_NULL + ll_assert(self.page_for_size[size_class] == PAGE_NULL, + "allocate_new_page() called but a page is already waiting") + self.page_for_size[size_class] = result + return result + + + def allocate_new_arena(self): + ll_assert(self.num_uninitialized_pages == 0, + "some uninitialized pages are already waiting") + # + # 'arena_base' points to the start of malloced memory; it might not + # be a page-aligned address + arena_base = llarena.arena_malloc(self.arena_size, False) + if not arena_base: + raise MemoryError("couldn't allocate the next arena") + arena_end = arena_base + self.arena_size + # + # 'firstpage' points to the first unused page + firstpage = start_of_page(arena_base + self.page_size - 1, + self.page_size) + # 'npages' is the number of full pages just allocated + npages = (arena_end - firstpage) // self.page_size + # + # add these pages to the list + self.uninitialized_pages = firstpage + self.num_uninitialized_pages = npages + # + # increase a bit arena_size for the next time + self.arena_size = (self.arena_size // 4 * 5) + (self.page_size - 1) + self.arena_size = (self.arena_size // self.page_size) * self.page_size + allocate_new_arena._dont_inline_ = True + + + def free(self, obj, size): + """Free a previously malloc'ed block.""" + ll_assert(size > 0, "free: size is null or negative") + ll_assert(size <= self.small_request_threshold, "free: size too big") + ll_assert((size & (WORD-1)) == 0, "free: size is not aligned") + # + llarena.arena_reset(obj, _dummy_size(size), False) + pageaddr = start_of_page(obj, self.page_size) + if not we_are_translated(): + hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) + assert obj - pageaddr >= hdrsize + assert (obj - pageaddr - hdrsize) % size == 0 + page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR) + size_class = size / WORD + # + # Increment the number of known free objects + nfree = page.nfree + 1 + if nfree < self.nblocks_for_size[size_class]: + # + # Not all objects in this page are freed yet. + # Add the free block to the chained list. + page.nfree = nfree + llarena.arena_reserve(obj, llmemory.sizeof(llmemory.Address), + False) + obj.address[0] = page.freeblock + page.freeblock = obj + # + # If the page was full, then it now has space and should be + # linked back in the page_for_size[] linked list. + if nfree == 1: + page.nextpage = self.page_for_size[size_class] + if page.nextpage != PAGE_NULL: + page.nextpage.prevpage = page + self.page_for_size[size_class] = page + # + else: + # The page becomes completely free. Remove it from + # the page_for_size[] linked list. + if page == self.page_for_size[size_class]: + self.page_for_size[size_class] = page.nextpage + else: + prev = page.prevpage + next = page.nextpage + prev.nextpage = next + next.prevpage = prev + # + # Free the page, putting it back in the chained list of the arena + # where it belongs + xxx#... + + +# ____________________________________________________________ +# Helpers to go from a pointer to the start of its page + +def start_of_page(addr, page_size): + """Return the address of the start of the page that contains 'addr'.""" + if we_are_translated(): + xxx + else: + return _start_of_page_untranslated(addr, page_size) + +def _start_of_page_untranslated(addr, page_size): + assert isinstance(addr, llarena.fakearenaaddress) + shift = 4 # for testing, we assume that the whole arena is not + # on a page boundary + ofs = ((addr.offset - shift) // page_size) * page_size + shift + return llarena.fakearenaaddress(addr.arena, ofs) + +def _dummy_size(size): + if we_are_translated(): + return size + if isinstance(size, int): + size = llmemory.sizeof(lltype.Char) * size + return size Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py Wed Sep 15 10:00:52 2010 @@ -458,5 +458,11 @@ test_varsized_from_prebuilt_gc.GC_PARAMS = {'space_size': 3 * 1024 * WORD} -class TestMiniMarkGC(DirectGCTest): +class TestMiniMarkGCSimple(DirectGCTest): + from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass + from pypy.rpython.memory.gc.minimark import SimpleArenaCollection + # test the GC itself, providing a simple class for ArenaCollection + GC_PARAMS = {'ArenaCollectionClass': SimpleArenaCollection} + +class TestMiniMarkGCFull(DirectGCTest): from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass From afa at codespeak.net Wed Sep 15 10:13:20 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Wed, 15 Sep 2010 10:13:20 +0200 (CEST) Subject: [pypy-svn] r77077 - in pypy/branch/fast-forward/pypy: rpython/lltypesystem/module rpython/lltypesystem/module/test translator/c/src Message-ID: <20100915081320.4C069282BEA@codespeak.net> Author: afa Date: Wed Sep 15 10:13:18 2010 New Revision: 77077 Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/module/ll_math.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/module/test/test_ll_math.py pypy/branch/fast-forward/pypy/translator/c/src/math.c Log: Implement math.isnan and math.isinf for Windows Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/module/ll_math.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/module/ll_math.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/module/ll_math.py Wed Sep 15 10:13:18 2010 @@ -18,16 +18,25 @@ separate_module_files=[srcdir.join('math.c')], export_symbols=['_pypy_math_acosh', '_pypy_math_asinh', '_pypy_math_atanh', - '_pypy_math_expm1', '_pypy_math_log1p'], + '_pypy_math_expm1', '_pypy_math_log1p', + '_pypy_math_isinf', '_pypy_math_isnan'], ) + math_prefix = '_pypy_math_' else: eci = ExternalCompilationInfo( libraries=['m']) + math_eci = eci + math_prefix = '' def llexternal(name, ARGS, RESULT): return rffi.llexternal(name, ARGS, RESULT, compilation_info=eci, sandboxsafe=True) +def math_llexternal(name, ARGS, RESULT): + return rffi.llexternal(math_prefix + name, ARGS, RESULT, + compilation_info=math_eci, + sandboxsafe=True) + if sys.platform == 'win32': underscore = '_' else: @@ -46,8 +55,8 @@ math_fmod = llexternal('fmod', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_hypot = llexternal(underscore + 'hypot', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) -math_isinf = llexternal('isinf', [rffi.DOUBLE], rffi.INT) -math_isnan = llexternal('isnan', [rffi.DOUBLE], rffi.INT) +math_isinf = math_llexternal('isinf', [rffi.DOUBLE], rffi.INT) +math_isnan = math_llexternal('isnan', [rffi.DOUBLE], rffi.INT) # ____________________________________________________________ # @@ -296,9 +305,7 @@ def new_unary_math_function(name, can_overflow, c99): if sys.platform == 'win32' and c99: - win32name = '_pypy_math_%s' % (name,) - c_func = rffi.llexternal(win32name, [rffi.DOUBLE], rffi.DOUBLE, - compilation_info=math_eci, sandboxsafe=True) + c_func = math_llexternal(name, [rffi.DOUBLE], rffi.DOUBLE) else: c_func = llexternal(name, [rffi.DOUBLE], rffi.DOUBLE) Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/module/test/test_ll_math.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/module/test/test_ll_math.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/module/test/test_ll_math.py Wed Sep 15 10:13:18 2010 @@ -6,7 +6,19 @@ class TestMath(MathTests): - pass + def test_isinf(self): + inf = 1e200 * 1e200 + nan = inf / inf + assert not ll_math.ll_math_isinf(0) + assert ll_math.ll_math_isinf(inf) + assert not ll_math.ll_math_isinf(nan) + + def test_isnan(self): + inf = 1e200 * 1e200 + nan = inf / inf + assert not ll_math.ll_math_isnan(0) + assert ll_math.ll_math_isnan(nan) + assert not ll_math.ll_math_isnan(inf) def make_test_case((fnname, args, expected), dict): # Modified: pypy/branch/fast-forward/pypy/translator/c/src/math.c ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/src/math.c (original) +++ pypy/branch/fast-forward/pypy/translator/c/src/math.c Wed Sep 15 10:13:18 2010 @@ -18,6 +18,18 @@ #undef PyPy_NAN +int +_pypy_math_isinf(double x) +{ + return PyPy_IS_INFINITY(x); +} + +int +_pypy_math_isnan(double x) +{ + return PyPy_IS_NAN(x); +} + /* The following copyright notice applies to the original implementations of acosh, asinh and atanh. */ From arigo at codespeak.net Wed Sep 15 10:36:20 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 15 Sep 2010 10:36:20 +0200 (CEST) Subject: [pypy-svn] r77078 - in pypy/branch/gen2-gc/pypy/rpython/memory/gc: . test Message-ID: <20100915083620.9E642282BEA@codespeak.net> Author: arigo Date: Wed Sep 15 10:36:19 2010 New Revision: 77078 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Log: Fixing the tests, in-progress. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Wed Sep 15 10:36:19 2010 @@ -1,13 +1,11 @@ -from pypy.rpython.lltypesystem import lltype, llmemory, llarena, rffi, llgroup +from pypy.rpython.lltypesystem import lltype, llmemory, llarena, llgroup from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.memory.gc.base import MovingGCBase from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE from pypy.rlib.rarithmetic import LONG_BIT -from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import ll_assert, debug_print WORD = LONG_BIT // 8 -NULL = llmemory.NULL first_gcflag = 1 << (LONG_BIT//2) @@ -90,6 +88,7 @@ # # The ArenaCollection() handles the nonmovable objects allocation. if ArenaCollectionClass is None: + from pypy.rpython.memory.gc.minimarkpage import ArenaCollection ArenaCollectionClass = ArenaCollection self.ac = ArenaCollectionClass(arena_size, page_size, small_request_threshold) Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py Wed Sep 15 10:36:19 2010 @@ -1,3 +1,11 @@ +from pypy.rpython.lltypesystem import lltype, llmemory, llarena, rffi +from pypy.rlib.rarithmetic import LONG_BIT +from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.debug import ll_assert + +WORD = LONG_BIT // 8 +NULL = llmemory.NULL + # Terminology: the memory is subdivided into "pages". # A page contains a number of allocated objects, called "blocks". @@ -61,7 +69,7 @@ # self.uninitialized_pages = PAGE_NULL self.num_uninitialized_pages = 0 - self.free_pages = PAGE_NULL + self.free_pages = NULL def malloc(self, size): @@ -107,14 +115,12 @@ def allocate_new_page(self, size_class): """Allocate and return a new page for the given size_class.""" # - if self.free_pages != PAGE_NULL: + if self.free_pages != NULL: # # Get the page from the chained list 'free_pages'. page = self.free_pages self.free_pages = page.address[0] - llarena.arena_reset(self.free_pages, - llmemory.sizeof(llmemory.Address), - False) + llarena.arena_reset(page, llmemory.sizeof(llmemory.Address), 0) else: # Get the next free page from the uninitialized pages. if self.num_uninitialized_pages == 0: Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Wed Sep 15 10:36:19 2010 @@ -1,22 +1,23 @@ import py -from pypy.rpython.memory.gc import minimark -from pypy.rpython.memory.gc.minimark import PAGE_NULL, PAGE_HEADER, PAGE_PTR -from pypy.rpython.memory.gc.minimark import WORD +from pypy.rpython.memory.gc.minimarkpage import ArenaCollection +from pypy.rpython.memory.gc.minimarkpage import PAGE_HEADER, PAGE_PTR +from pypy.rpython.memory.gc.minimarkpage import PAGE_NULL, WORD from pypy.rpython.lltypesystem import lltype, llmemory, llarena from pypy.rpython.lltypesystem.llmemory import cast_ptr_to_adr +NULL = llmemory.NULL SHIFT = 4 hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) def test_allocate_arena(): - ac = minimark.ArenaCollection(SHIFT + 8*20, 8, 1) + ac = ArenaCollection(SHIFT + 8*20, 8, 1) ac.allocate_new_arena() assert ac.num_uninitialized_pages == 20 ac.uninitialized_pages + 8*20 # does not raise py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 8*20 + 1") # - ac = minimark.ArenaCollection(SHIFT + 8*20 + 7, 8, 1) + ac = ArenaCollection(SHIFT + 8*20 + 7, 8, 1) ac.allocate_new_arena() assert ac.num_uninitialized_pages == 20 ac.uninitialized_pages + 8*20 + 7 # does not raise @@ -35,7 +36,7 @@ assert llmemory.cast_ptr_to_adr(page) == page1 assert page.nextpage == PAGE_NULL # - ac = minimark.ArenaCollection(arenasize, pagesize, 99) + ac = ArenaCollection(arenasize, pagesize, 99) assert ac.num_uninitialized_pages == 0 # page = ac.allocate_new_page(5) @@ -56,10 +57,11 @@ assert ac.page_for_size[4] == page -def arena_collection_for_test(pagesize, *pagelayouts): - nb_pages = len(pagelayouts[0]) +def arena_collection_for_test(pagesize, pagelayout): + assert " " not in pagelayout.rstrip(" ") + nb_pages = len(pagelayout) arenasize = pagesize * (nb_pages + 1) - 1 - ac = minimark.ArenaCollection(arenasize, pagesize, 9*WORD) + ac = ArenaCollection(arenasize, pagesize, 9*WORD) # def link(pageaddr, size_class, size_block, nblocks, nusedblocks): llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER)) @@ -72,49 +74,34 @@ if page.nextpage: page.nextpage.prevpage = page # - alist = [] - for layout in pagelayouts: - assert len(layout) == nb_pages - assert " " not in layout.rstrip(" ") - a = minimark.allocate_arena(arenasize, pagesize) - alist.append(a) - assert lltype.typeOf(a.freepage) == llmemory.Address - startpageaddr = a.freepage - a.freepage += pagesize * min((layout + " ").index(" "), - (layout + ".").index(".")) - a.nfreepages = layout.count(" ") + layout.count(".") - a.nuninitializedpages = layout.count(" ") - # - pageaddr = startpageaddr - for i, c in enumerate(layout): - if '1' <= c <= '9': # a partially used page (1 block free) - size_class = int(c) - size_block = WORD * size_class - nblocks = (pagesize - hdrsize) // size_block - link(pageaddr, size_class, size_block, nblocks, nblocks-1) - elif c == '.': # a free, but initialized, page - next_free_num = min((layout + " ").find(" ", i+1), - (layout + ".").find(".", i+1)) - addr = startpageaddr + pagesize * next_free_num - llarena.arena_reserve(pageaddr, - llmemory.sizeof(llmemory.Address)) - pageaddr.address[0] = addr - elif c == '#': # a random full page, not in any linked list - pass - elif c == ' ': # the tail is uninitialized free pages - break - pageaddr += pagesize - # - assert alist == sorted(alist, key=lambda a: a.nfreepages) - # - ac.arenas_start = alist[0] - ac.arenas_end = alist[-1] - for a, b in zip(alist[:-1], alist[1:]): - a.nextarena = b - b.prevarena = a + ac.allocate_new_arena() + num_initialized_pages = len(pagelayout.rstrip(" ")) + ac._startpageaddr = ac.uninitialized_pages + ac.uninitialized_pages += pagesize * num_initialized_pages + ac.num_uninitialized_pages -= num_initialized_pages + # + for i in reversed(range(num_initialized_pages)): + c = pagelayout[i] + if '1' <= c <= '9': # a partially used page (1 block free) + size_class = int(c) + size_block = WORD * size_class + nblocks = (pagesize - hdrsize) // size_block + link(pagenum(ac, i), size_class, size_block, + nblocks, nblocks-1) + elif c == '.': # a free, but initialized, page + pageaddr = pagenum(ac, i) + llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address)) + pageaddr.address[0] = ac.free_pages + ac.free_pages = pageaddr + elif c == '#': # a random full page, not in any linked list + pass + # return ac +def pagenum(ac, i): + return ac._startpageaddr + ac.page_size * i + def getarena(ac, num, total=None): if total is not None: a = getarena(ac, total-1) @@ -128,29 +115,29 @@ a = a.nextarena return a -def checkpage(ac, page, arena, nb_page): - pageaddr = llmemory.cast_ptr_to_adr(page) - assert pageaddr == arena.arena_base + SHIFT + nb_page * ac.page_size +def checkpage(ac, page, expected_position): + assert llmemory.cast_ptr_to_adr(page) == pagenum(ac, expected_position) def test_simple_arena_collection(): pagesize = hdrsize + 16 ac = arena_collection_for_test(pagesize, "##....# ") - #assert ac.... - assert ac.arenas_end.nfreepages == 4 # - a0 = getarena(ac, 0, total=2) - a1 = getarena(ac, 1, total=2) - page = ac.allocate_new_page(1); checkpage(ac, page, a0, 2) - page = ac.allocate_new_page(2); checkpage(ac, page, a0, 3) - assert getarena(ac, 0, total=2) == a0 - page = ac.allocate_new_page(3); checkpage(ac, page, a0, 4) - assert getarena(ac, 0, total=1) == a1 - page = ac.allocate_new_page(4); checkpage(ac, page, a1, 0) - page = ac.allocate_new_page(5); checkpage(ac, page, a1, 2) - page = ac.allocate_new_page(6); checkpage(ac, page, a1, 3) - page = ac.allocate_new_page(7); checkpage(ac, page, a1, 4) - assert ac.arenas_start == ac.arenas_end == ARENA_NULL + assert ac.free_pages == pagenum(ac, 2) + page = ac.allocate_new_page(1); checkpage(ac, page, 2) + assert ac.free_pages == pagenum(ac, 3) + page = ac.allocate_new_page(2); checkpage(ac, page, 3) + assert ac.free_pages == pagenum(ac, 4) + page = ac.allocate_new_page(3); checkpage(ac, page, 4) + assert ac.free_pages == pagenum(ac, 5) + page = ac.allocate_new_page(4); checkpage(ac, page, 5) + assert ac.free_pages == NULL and ac.num_uninitialized_pages == 3 + page = ac.allocate_new_page(5); checkpage(ac, page, 7) + assert ac.free_pages == NULL and ac.num_uninitialized_pages == 2 + page = ac.allocate_new_page(6); checkpage(ac, page, 8) + assert ac.free_pages == NULL and ac.num_uninitialized_pages == 1 + page = ac.allocate_new_page(7); checkpage(ac, page, 9) + assert ac.free_pages == NULL and ac.num_uninitialized_pages == 0 def ckob(ac, arena, num_page, pos_obj, obj): From arigo at codespeak.net Wed Sep 15 10:46:13 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 15 Sep 2010 10:46:13 +0200 (CEST) Subject: [pypy-svn] r77079 - pypy/branch/gen2-gc/pypy/rpython/memory/gc/test Message-ID: <20100915084613.CB6FA282BEA@codespeak.net> Author: arigo Date: Wed Sep 15 10:46:12 2010 New Revision: 77079 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Log: Finish to fix the tests. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Wed Sep 15 10:46:12 2010 @@ -66,8 +66,8 @@ def link(pageaddr, size_class, size_block, nblocks, nusedblocks): llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER)) page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR) - page.nfree = nblocks - nusedblocks - page.nuninitialized = page.nfree + page.nfree = 0 + page.nuninitialized = nblocks - nusedblocks page.freeblock = pageaddr + hdrsize + nusedblocks * size_block page.nextpage = ac.page_for_size[size_class] ac.page_for_size[size_class] = page @@ -96,6 +96,7 @@ elif c == '#': # a random full page, not in any linked list pass # + ac.allocate_new_arena = lambda: should_not_allocate_new_arenas return ac @@ -140,57 +141,49 @@ assert ac.free_pages == NULL and ac.num_uninitialized_pages == 0 -def ckob(ac, arena, num_page, pos_obj, obj): - pageaddr = arena.arena_base + SHIFT + num_page * ac.page_size +def chkob(ac, num_page, pos_obj, obj): + pageaddr = pagenum(ac, num_page) assert obj == pageaddr + hdrsize + pos_obj def test_malloc_common_case(): pagesize = hdrsize + 7*WORD ac = arena_collection_for_test(pagesize, "#23..2 ") - a0 = getarena(ac, 0, total=1) - obj = ac.malloc(2*WORD); ckob(ac, a0, 5, 4*WORD, obj) - obj = ac.malloc(2*WORD); ckob(ac, a0, 1, 4*WORD, obj) - obj = ac.malloc(2*WORD); ckob(ac, a0, 3, 0*WORD, obj) - obj = ac.malloc(2*WORD); ckob(ac, a0, 3, 2*WORD, obj) - obj = ac.malloc(2*WORD); ckob(ac, a0, 3, 4*WORD, obj) - obj = ac.malloc(2*WORD); ckob(ac, a0, 4, 0*WORD, obj) - obj = ac.malloc(2*WORD); ckob(ac, a0, 4, 2*WORD, obj) - obj = ac.malloc(2*WORD); ckob(ac, a0, 4, 4*WORD, obj) - obj = ac.malloc(2*WORD); ckob(ac, a0, 6, 0*WORD, obj) - obj = ac.malloc(2*WORD); ckob(ac, a0, 6, 2*WORD, obj) - obj = ac.malloc(2*WORD); ckob(ac, a0, 6, 4*WORD, obj) + obj = ac.malloc(2*WORD); chkob(ac, 1, 4*WORD, obj) + obj = ac.malloc(2*WORD); chkob(ac, 5, 4*WORD, obj) + obj = ac.malloc(2*WORD); chkob(ac, 3, 0*WORD, obj) + obj = ac.malloc(2*WORD); chkob(ac, 3, 2*WORD, obj) + obj = ac.malloc(2*WORD); chkob(ac, 3, 4*WORD, obj) + obj = ac.malloc(2*WORD); chkob(ac, 4, 0*WORD, obj) + obj = ac.malloc(2*WORD); chkob(ac, 4, 2*WORD, obj) + obj = ac.malloc(2*WORD); chkob(ac, 4, 4*WORD, obj) + obj = ac.malloc(2*WORD); chkob(ac, 6, 0*WORD, obj) + obj = ac.malloc(2*WORD); chkob(ac, 6, 2*WORD, obj) + obj = ac.malloc(2*WORD); chkob(ac, 6, 4*WORD, obj) def test_malloc_mixed_sizes(): pagesize = hdrsize + 7*WORD ac = arena_collection_for_test(pagesize, "#23..2 ") - a0 = getarena(ac, 0, total=1) - obj = ac.malloc(2*WORD); ckob(ac, a0, 5, 4*WORD, obj) - obj = ac.malloc(3*WORD); ckob(ac, a0, 2, 3*WORD, obj) - obj = ac.malloc(2*WORD); ckob(ac, a0, 1, 4*WORD, obj) - obj = ac.malloc(3*WORD); ckob(ac, a0, 3, 0*WORD, obj) # 3rd page -> size 3 - obj = ac.malloc(2*WORD); ckob(ac, a0, 4, 0*WORD, obj) # 4th page -> size 2 - obj = ac.malloc(3*WORD); ckob(ac, a0, 3, 3*WORD, obj) - obj = ac.malloc(2*WORD); ckob(ac, a0, 4, 2*WORD, obj) - obj = ac.malloc(3*WORD); ckob(ac, a0, 6, 0*WORD, obj) # 6th page -> size 3 - obj = ac.malloc(2*WORD); ckob(ac, a0, 4, 4*WORD, obj) - obj = ac.malloc(3*WORD); ckob(ac, a0, 6, 3*WORD, obj) + obj = ac.malloc(2*WORD); chkob(ac, 1, 4*WORD, obj) + obj = ac.malloc(3*WORD); chkob(ac, 2, 3*WORD, obj) + obj = ac.malloc(2*WORD); chkob(ac, 5, 4*WORD, obj) + obj = ac.malloc(3*WORD); chkob(ac, 3, 0*WORD, obj) # 3rd page -> size 3 + obj = ac.malloc(2*WORD); chkob(ac, 4, 0*WORD, obj) # 4th page -> size 2 + obj = ac.malloc(3*WORD); chkob(ac, 3, 3*WORD, obj) + obj = ac.malloc(2*WORD); chkob(ac, 4, 2*WORD, obj) + obj = ac.malloc(3*WORD); chkob(ac, 6, 0*WORD, obj) # 6th page -> size 3 + obj = ac.malloc(2*WORD); chkob(ac, 4, 4*WORD, obj) + obj = ac.malloc(3*WORD); chkob(ac, 6, 3*WORD, obj) def test_malloc_new_arena(): pagesize = hdrsize + 7*WORD - ac = arena_collection_for_test(pagesize, "#23..2 ") - a0 = getarena(ac, 0, total=1) - obj = ac.malloc(5*WORD); ckob(ac, a0, 3, 0*WORD, obj) # 3rd page -> size 5 - obj = ac.malloc(4*WORD); ckob(ac, a0, 4, 0*WORD, obj) # 4th page -> size 4 - obj = ac.malloc(1*WORD); ckob(ac, a0, 6, 0*WORD, obj) # 6th page -> size 1 - assert ac.arenas_start == ac.arenas_end == ARENA_NULL # no more free page - obj = ac.malloc(1*WORD); ckob(ac, a0, 6, 1*WORD, obj) - obj = ac.malloc(5*WORD) - a1 = getarena(ac, 0, total=1) - pass; ckob(ac, a1, 0, 0*WORD, obj) # a1/0 -> size 5 - obj = ac.malloc(1*WORD); ckob(ac, a0, 6, 2*WORD, obj) - obj = ac.malloc(5*WORD); ckob(ac, a1, 1, 0*WORD, obj) # a1/1 -> size 5 - obj = ac.malloc(1*WORD); ckob(ac, a0, 6, 3*WORD, obj) - obj = ac.malloc(2*WORD); ckob(ac, a0, 5, 4*WORD, obj) - obj = ac.malloc(2*WORD); ckob(ac, a0, 1, 4*WORD, obj) - obj = ac.malloc(2*WORD); ckob(ac, a1, 2, 0*WORD, obj) # a1/2 -> size 2 + ac = arena_collection_for_test(pagesize, "### ") + obj = ac.malloc(2*WORD); chkob(ac, 3, 0*WORD, obj) # 3rd page -> size 2 + # + del ac.allocate_new_arena # restore the one from the class + arena_size = ac.arena_size + obj = ac.malloc(3*WORD) # need a new arena + assert ac.num_uninitialized_pages == (arena_size // ac.page_size + - 1 # for start_of_page() + - 1 # the just-allocated page + ) From arigo at codespeak.net Wed Sep 15 11:51:45 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 15 Sep 2010 11:51:45 +0200 (CEST) Subject: [pypy-svn] r77081 - in pypy/branch/gen2-gc/pypy/rpython/memory/gc: . test Message-ID: <20100915095145.BF9F3282C16@codespeak.net> Author: arigo Date: Wed Sep 15 11:51:44 2010 New Revision: 77081 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Log: Start to work on mass_free(). Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py Wed Sep 15 11:51:44 2010 @@ -27,11 +27,11 @@ PAGE_PTR = lltype.Ptr(lltype.ForwardReference()) PAGE_HEADER = lltype.Struct('PageHeader', - # -- The following two pointers make a chained list of pages with the same - # size class. Warning, 'prevpage' contains random garbage for the first - # entry in the list. + # -- The following pointer makes a chained list of pages. For non-full + # pages, it is a chained list of pages having the same size class, + # rooted in 'page_for_size[size_class]'. For full pages, it is a + # different chained list rooted in 'full_page_for_size[size_class]'. ('nextpage', PAGE_PTR), - ('prevpage', PAGE_PTR), # -- The number of free blocks, and the number of uninitialized blocks. # The number of allocated blocks is the rest. ('nuninitialized', lltype.Signed), @@ -61,11 +61,13 @@ length = small_request_threshold / WORD + 1 self.page_for_size = lltype.malloc(rffi.CArray(PAGE_PTR), length, flavor='raw', zero=True) + self.full_page_for_size = lltype.malloc(rffi.CArray(PAGE_PTR), length, + flavor='raw', zero=True) self.nblocks_for_size = lltype.malloc(rffi.CArray(lltype.Signed), length, flavor='raw') - hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) + self.hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) for i in range(1, length): - self.nblocks_for_size[i] = (page_size - hdrsize) // (WORD * i) + self.nblocks_for_size[i] = (page_size - self.hdrsize) // (WORD * i) # self.uninitialized_pages = PAGE_NULL self.num_uninitialized_pages = 0 @@ -91,7 +93,7 @@ # # The 'result' was part of the chained list; read the next. page.nfree -= 1 - page.freeblock = result.address[0] + freeblock = result.address[0] llarena.arena_reset(result, llmemory.sizeof(llmemory.Address), False) @@ -100,13 +102,19 @@ # The 'result' is part of the uninitialized blocks. ll_assert(page.nuninitialized > 0, "fully allocated page found in the page_for_size list") - page.freeblock = result + nsize page.nuninitialized -= 1 - if page.nuninitialized == 0: - # - # This was the last free block, so unlink the page from the - # chained list. - self.page_for_size[size_class] = page.nextpage + if page.nuninitialized > 0: + freeblock = result + nsize + else: + freeblock = NULL + # + page.freeblock = freeblock + if freeblock == NULL: + # This was the last free block, so unlink the page from the + # chained list and put it in the 'full_page_for_size' list. + self.page_for_size[size_class] = page.nextpage + page.nextpage = self.full_page_for_size[size_class] + self.full_page_for_size[size_class] = page # llarena.arena_reserve(result, _dummy_size(size), False) return result @@ -133,10 +141,9 @@ llarena.arena_reserve(page, llmemory.sizeof(PAGE_HEADER)) result = llmemory.cast_adr_to_ptr(page, PAGE_PTR) # - hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) result.nuninitialized = self.nblocks_for_size[size_class] result.nfree = 0 - result.freeblock = page + hdrsize + result.freeblock = page + self.hdrsize result.nextpage = PAGE_NULL ll_assert(self.page_for_size[size_class] == PAGE_NULL, "allocate_new_page() called but a page is already waiting") @@ -171,6 +178,72 @@ allocate_new_arena._dont_inline_ = True + def mass_free(self, ok_to_free_func): + """For each object, if ok_to_free_func(obj) returns True, then free + the object. + """ + # + # For each size class: + size_class = self.small_request_threshold / WORD + while size_class >= 1: + # + # Walk the pages in 'page_for_size[size_class]' and free objects. + # Pages completely freed are added to 'self.free_pages', and + # become available for reuse by any size class. Pages not + # completely freed are re-chained in 'newlist'. + newlist = self.mass_free_in_list(self.page_for_size[size_class], + size_class, ok_to_free_func) + self.page_for_size[size_class] = newlist + # + size_class -= 1 + + + def mass_free_in_list(self, page, size_class, ok_to_free_func): + remaining_list = PAGE_NULL + nblocks = self.nblocks_for_size[size_class] + block_size = size_class * WORD + # + while page != PAGE_NULL: + self.walk_page(page, block_size, nblocks, ok_to_free_func) + page = page.nextpage + # + return remaining_list + + + def walk_page(self, page, block_size, nblocks, ok_to_free_func): + """Walk over all objects in a page, and ask ok_to_free_func().""" + # + freeblock = page.freeblock + obj = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page)) + obj += self.hdrsize + surviving_count = 0 + # + nblocks -= page.nuninitialized + while nblocks > 0: + # + if obj == freeblock: + # + # 'obj' points to a free block. + freeblock = obj.address[0] + # + else: + # 'obj' points to a valid object. + ll_assert(not freeblock or freeblock > obj, + "freeblocks are linked out of order") + # + if ok_to_free_func(obj): + xxx + else: + # The object should survive. + surviving_count += 1 + # + obj += block_size + nblocks -= 1 + # + # Return the number of objects left + return surviving_count + + def free(self, obj, size): """Free a previously malloc'ed block.""" ll_assert(size > 0, "free: size is null or negative") Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Wed Sep 15 11:51:44 2010 @@ -2,6 +2,7 @@ from pypy.rpython.memory.gc.minimarkpage import ArenaCollection from pypy.rpython.memory.gc.minimarkpage import PAGE_HEADER, PAGE_PTR from pypy.rpython.memory.gc.minimarkpage import PAGE_NULL, WORD +from pypy.rpython.memory.gc.minimarkpage import _dummy_size from pypy.rpython.lltypesystem import lltype, llmemory, llarena from pypy.rpython.lltypesystem.llmemory import cast_ptr_to_adr @@ -57,7 +58,7 @@ assert ac.page_for_size[4] == page -def arena_collection_for_test(pagesize, pagelayout): +def arena_collection_for_test(pagesize, pagelayout, fill_with_objects=False): assert " " not in pagelayout.rstrip(" ") nb_pages = len(pagelayout) arenasize = pagesize * (nb_pages + 1) - 1 @@ -69,10 +70,16 @@ page.nfree = 0 page.nuninitialized = nblocks - nusedblocks page.freeblock = pageaddr + hdrsize + nusedblocks * size_block - page.nextpage = ac.page_for_size[size_class] - ac.page_for_size[size_class] = page - if page.nextpage: - page.nextpage.prevpage = page + if nusedblocks < nblocks: + chainedlists = ac.page_for_size + else: + chainedlists = ac.full_page_for_size + page.nextpage = chainedlists[size_class] + chainedlists[size_class] = page + if fill_with_objects: + for i in range(nusedblocks): + objaddr = pageaddr + hdrsize + i * size_block + llarena.arena_reserve(objaddr, _dummy_size(size_block)) # ac.allocate_new_arena() num_initialized_pages = len(pagelayout.rstrip(" ")) @@ -81,20 +88,22 @@ ac.num_uninitialized_pages -= num_initialized_pages # for i in reversed(range(num_initialized_pages)): + pageaddr = pagenum(ac, i) c = pagelayout[i] if '1' <= c <= '9': # a partially used page (1 block free) size_class = int(c) size_block = WORD * size_class nblocks = (pagesize - hdrsize) // size_block - link(pagenum(ac, i), size_class, size_block, - nblocks, nblocks-1) + link(pageaddr, size_class, size_block, nblocks, nblocks-1) elif c == '.': # a free, but initialized, page - pageaddr = pagenum(ac, i) llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address)) pageaddr.address[0] = ac.free_pages ac.free_pages = pageaddr - elif c == '#': # a random full page, not in any linked list - pass + elif c == '#': # a random full page, in the list 'full_pages' + size_class = fill_with_objects or 1 + size_block = WORD * size_class + nblocks = (pagesize - hdrsize) // size_block + link(pageaddr, size_class, size_block, nblocks, nblocks) # ac.allocate_new_arena = lambda: should_not_allocate_new_arenas return ac @@ -187,3 +196,24 @@ - 1 # for start_of_page() - 1 # the just-allocated page ) + +class OkToFree(object): + def __init__(self, ac, answer): + self.ac = ac + self.answer = answer + self.seen = [] + + def __call__(self, addr): + self.seen.append(addr - self.ac._startpageaddr) + if isinstance(self.answer, bool): + return self.answer + else: + return self.answer(addr) + +def test_mass_free_partial_remains(): + pagesize = hdrsize + 7*WORD + ac = arena_collection_for_test(pagesize, "2", fill_with_objects=2) + ok_to_free = OkToFree(ac, False) + ac.mass_free(ok_to_free) + assert ok_to_free.seen == [hdrsize + 0*WORD, + hdrsize + 2*WORD] From arigo at codespeak.net Wed Sep 15 11:55:49 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 15 Sep 2010 11:55:49 +0200 (CEST) Subject: [pypy-svn] r77082 - in pypy/branch/gen2-gc/pypy/rpython/memory/gc: . test Message-ID: <20100915095549.166EF282C16@codespeak.net> Author: arigo Date: Wed Sep 15 11:55:48 2010 New Revision: 77082 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Log: Enough to pass this test (not correct in general, of course). Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py Wed Sep 15 11:55:48 2010 @@ -199,7 +199,7 @@ def mass_free_in_list(self, page, size_class, ok_to_free_func): - remaining_list = PAGE_NULL + remaining_list = page #PAGE_NULL nblocks = self.nblocks_for_size[size_class] block_size = size_class * WORD # Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Wed Sep 15 11:55:48 2010 @@ -112,18 +112,8 @@ def pagenum(ac, i): return ac._startpageaddr + ac.page_size * i -def getarena(ac, num, total=None): - if total is not None: - a = getarena(ac, total-1) - assert a == ac.arenas_end - assert a.nextarena == ARENA_NULL - prev = ARENA_NULL - a = ac.arenas_start - for i in range(num): - assert a.prevarena == prev - prev = a - a = a.nextarena - return a +def getpage(ac, i): + return llmemory.cast_adr_to_ptr(pagenum(ac, i), PAGE_PTR) def checkpage(ac, page, expected_position): assert llmemory.cast_ptr_to_adr(page) == pagenum(ac, expected_position) @@ -217,3 +207,9 @@ ac.mass_free(ok_to_free) assert ok_to_free.seen == [hdrsize + 0*WORD, hdrsize + 2*WORD] + page = getpage(ac, 0) + assert page == ac.page_for_size[2] + assert page.nextpage == PAGE_NULL + assert page.nuninitialized == 1 + assert page.nfree == 0 + chkob(ac, 0, 4*WORD, page.freeblock) From antocuni at codespeak.net Wed Sep 15 13:07:21 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Wed, 15 Sep 2010 13:07:21 +0200 (CEST) Subject: [pypy-svn] r77083 - pypy/branch/jitffi Message-ID: <20100915110721.C94F0282C16@codespeak.net> Author: antocuni Date: Wed Sep 15 13:07:20 2010 New Revision: 77083 Added: pypy/branch/jitffi/ (props changed) - copied from r77082, pypy/trunk/ Log: a branch in which to try to jit rlib/libffi.py From arigo at codespeak.net Wed Sep 15 13:45:51 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 15 Sep 2010 13:45:51 +0200 (CEST) Subject: [pypy-svn] r77084 - in pypy/branch/gen2-gc/pypy/rpython/memory/gc: . test Message-ID: <20100915114551.172A8282C16@codespeak.net> Author: arigo Date: Wed Sep 15 13:45:50 2010 New Revision: 77084 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Log: Test and write what occurs when a page becomes completely free. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py Wed Sep 15 13:45:50 2010 @@ -191,39 +191,76 @@ # Pages completely freed are added to 'self.free_pages', and # become available for reuse by any size class. Pages not # completely freed are re-chained in 'newlist'. - newlist = self.mass_free_in_list(self.page_for_size[size_class], - size_class, ok_to_free_func) - self.page_for_size[size_class] = newlist + self.mass_free_in_partial_list(size_class, ok_to_free_func) # size_class -= 1 - def mass_free_in_list(self, page, size_class, ok_to_free_func): - remaining_list = page #PAGE_NULL + def mass_free_in_partial_list(self, size_class, ok_to_free_func): + page = self.page_for_size[size_class] nblocks = self.nblocks_for_size[size_class] block_size = size_class * WORD + remaining_pages = PAGE_NULL # while page != PAGE_NULL: - self.walk_page(page, block_size, nblocks, ok_to_free_func) - page = page.nextpage + # + # Collect the page. + surviving = self.walk_page(page, block_size, + nblocks, ok_to_free_func) + nextpage = page.nextpage + # + if surviving > 0: + # + # Found at least 1 object surviving. Re-insert the page + # in the chained list. + page.nextpage = remaining_pages + remaining_pages = page + # + else: + # No object survives; free the page. + self.free_page(page) + + page = nextpage # - return remaining_list + self.page_for_size[size_class] = remaining_pages + + + def free_page(self, page): + """Free a whole page.""" + # + # Done by inserting it in the 'free_pages' list. + pageaddr = llmemory.cast_ptr_to_adr(page) + pageaddr = llarena.getfakearenaaddress(pageaddr) + llarena.arena_reset(pageaddr, llmemory.sizeof(PAGE_HEADER), 0) + llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address)) + pageaddr.address[0] = self.free_pages + self.free_pages = pageaddr def walk_page(self, page, block_size, nblocks, ok_to_free_func): """Walk over all objects in a page, and ask ok_to_free_func().""" # + # 'freeblock' is the next free block, or NULL if there isn't any more. freeblock = page.freeblock + # + # 'prevfreeblockat' is the address of where 'freeblock' was read from. + prevfreeblockat = lltype.direct_fieldptr(page, 'freeblock') + prevfreeblockat = llmemory.cast_ptr_to_adr(prevfreeblockat) + # obj = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page)) obj += self.hdrsize - surviving_count = 0 + surviving = 0 # initially # nblocks -= page.nuninitialized - while nblocks > 0: + index = nblocks + while index > 0: # if obj == freeblock: # - # 'obj' points to a free block. + # 'obj' points to a free block. It means that + # 'prevfreeblockat.address[0]' does not need to be updated. + # Just read the next free block from 'obj.address[0]'. + prevfreeblockat = obj freeblock = obj.address[0] # else: @@ -232,67 +269,29 @@ "freeblocks are linked out of order") # if ok_to_free_func(obj): - xxx + # + # The object should die. + llarena.arena_reset(obj, _dummy_size(block_size), 0) + llarena.arena_reserve(obj, + llmemory.sizeof(llmemory.Address), + False) + # Insert 'obj' in the linked list of free blocks. + prevfreeblockat.address[0] = obj + prevfreeblockat = obj + obj.address[0] = freeblock + # else: - # The object should survive. - surviving_count += 1 + # The object survives. + surviving += 1 # obj += block_size - nblocks -= 1 + index -= 1 # - # Return the number of objects left - return surviving_count - - - def free(self, obj, size): - """Free a previously malloc'ed block.""" - ll_assert(size > 0, "free: size is null or negative") - ll_assert(size <= self.small_request_threshold, "free: size too big") - ll_assert((size & (WORD-1)) == 0, "free: size is not aligned") - # - llarena.arena_reset(obj, _dummy_size(size), False) - pageaddr = start_of_page(obj, self.page_size) - if not we_are_translated(): - hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) - assert obj - pageaddr >= hdrsize - assert (obj - pageaddr - hdrsize) % size == 0 - page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR) - size_class = size / WORD - # - # Increment the number of known free objects - nfree = page.nfree + 1 - if nfree < self.nblocks_for_size[size_class]: - # - # Not all objects in this page are freed yet. - # Add the free block to the chained list. - page.nfree = nfree - llarena.arena_reserve(obj, llmemory.sizeof(llmemory.Address), - False) - obj.address[0] = page.freeblock - page.freeblock = obj - # - # If the page was full, then it now has space and should be - # linked back in the page_for_size[] linked list. - if nfree == 1: - page.nextpage = self.page_for_size[size_class] - if page.nextpage != PAGE_NULL: - page.nextpage.prevpage = page - self.page_for_size[size_class] = page - # - else: - # The page becomes completely free. Remove it from - # the page_for_size[] linked list. - if page == self.page_for_size[size_class]: - self.page_for_size[size_class] = page.nextpage - else: - prev = page.prevpage - next = page.nextpage - prev.nextpage = next - next.prevpage = prev - # - # Free the page, putting it back in the chained list of the arena - # where it belongs - xxx#... + # Update the number of free objects. + page.nfree = nblocks - surviving + # + # Return the number of surviving objects. + return surviving # ____________________________________________________________ Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Wed Sep 15 13:45:50 2010 @@ -213,3 +213,16 @@ assert page.nuninitialized == 1 assert page.nfree == 0 chkob(ac, 0, 4*WORD, page.freeblock) + assert ac.free_pages == NULL + +def test_mass_free_emptied_page(): + pagesize = hdrsize + 7*WORD + ac = arena_collection_for_test(pagesize, "2", fill_with_objects=2) + ok_to_free = OkToFree(ac, True) + ac.mass_free(ok_to_free) + assert ok_to_free.seen == [hdrsize + 0*WORD, + hdrsize + 2*WORD] + pageaddr = pagenum(ac, 0) + assert pageaddr == ac.free_pages + assert pageaddr.address[0] == NULL + assert ac.page_for_size[2] == PAGE_NULL From arigo at codespeak.net Wed Sep 15 13:56:02 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 15 Sep 2010 13:56:02 +0200 (CEST) Subject: [pypy-svn] r77085 - in pypy/branch/gen2-gc/pypy/rpython/memory/gc: . test Message-ID: <20100915115602.9AD0B282C16@codespeak.net> Author: arigo Date: Wed Sep 15 13:56:01 2010 New Revision: 77085 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Log: Another test, and implementation. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py Wed Sep 15 13:56:01 2010 @@ -187,42 +187,63 @@ size_class = self.small_request_threshold / WORD while size_class >= 1: # - # Walk the pages in 'page_for_size[size_class]' and free objects. + # Walk the pages in 'page_for_size[size_class]' and + # 'full_page_for_size[size_class]' and free some objects. # Pages completely freed are added to 'self.free_pages', and # become available for reuse by any size class. Pages not - # completely freed are re-chained in 'newlist'. - self.mass_free_in_partial_list(size_class, ok_to_free_func) + # completely freed are re-chained either in + # 'full_page_for_size[]' or 'page_for_size[]'. + self.mass_free_in_page(size_class, ok_to_free_func) # size_class -= 1 - def mass_free_in_partial_list(self, size_class, ok_to_free_func): - page = self.page_for_size[size_class] + def mass_free_in_page(self, size_class, ok_to_free_func): nblocks = self.nblocks_for_size[size_class] block_size = size_class * WORD - remaining_pages = PAGE_NULL + remaining_partial_pages = PAGE_NULL + remaining_full_pages = PAGE_NULL # - while page != PAGE_NULL: - # - # Collect the page. - surviving = self.walk_page(page, block_size, - nblocks, ok_to_free_func) - nextpage = page.nextpage + step = 0 + while step < 2: + if step == 0: + page = self.full_page_for_size[size_class] + else: + page = self.page_for_size[size_class] # - if surviving > 0: + while page != PAGE_NULL: # - # Found at least 1 object surviving. Re-insert the page - # in the chained list. - page.nextpage = remaining_pages - remaining_pages = page + # Collect the page. + surviving = self.walk_page(page, block_size, + nblocks, ok_to_free_func) + nextpage = page.nextpage # - else: - # No object survives; free the page. - self.free_page(page) + if surviving == nblocks: + # + # The page is still full. Re-insert it in the + # 'remaining_full_pages' chained list. + ll_assert(step == 0, + "A non-full page became full while freeing") + page.nextpage = remaining_full_pages + remaining_full_pages = page + # + elif surviving > 0: + # + # There is at least 1 object surviving. Re-insert + # the page in the 'remaining_partial_pages' chained list. + page.nextpage = remaining_partial_pages + remaining_partial_pages = page + # + else: + # No object survives; free the page. + self.free_page(page) - page = nextpage + page = nextpage + # + step += 1 # - self.page_for_size[size_class] = remaining_pages + self.page_for_size[size_class] = remaining_partial_pages + self.full_page_for_size[size_class] = remaining_full_pages def free_page(self, page): Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Wed Sep 15 13:56:01 2010 @@ -69,10 +69,11 @@ page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR) page.nfree = 0 page.nuninitialized = nblocks - nusedblocks - page.freeblock = pageaddr + hdrsize + nusedblocks * size_block if nusedblocks < nblocks: + page.freeblock = pageaddr + hdrsize + nusedblocks * size_block chainedlists = ac.page_for_size else: + page.freeblock = NULL chainedlists = ac.full_page_for_size page.nextpage = chainedlists[size_class] chainedlists[size_class] = page @@ -226,3 +227,20 @@ assert pageaddr == ac.free_pages assert pageaddr.address[0] == NULL assert ac.page_for_size[2] == PAGE_NULL + +def test_mass_free_full_remains_full(): + pagesize = hdrsize + 7*WORD + ac = arena_collection_for_test(pagesize, "#", fill_with_objects=2) + ok_to_free = OkToFree(ac, False) + ac.mass_free(ok_to_free) + assert ok_to_free.seen == [hdrsize + 0*WORD, + hdrsize + 2*WORD, + hdrsize + 4*WORD] + page = getpage(ac, 0) + assert page == ac.full_page_for_size[2] + assert page.nextpage == PAGE_NULL + assert page.nuninitialized == 0 + assert page.nfree == 0 + assert page.freeblock == NULL + assert ac.free_pages == NULL + assert ac.page_for_size[2] == PAGE_NULL From arigo at codespeak.net Wed Sep 15 14:18:35 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 15 Sep 2010 14:18:35 +0200 (CEST) Subject: [pypy-svn] r77086 - pypy/branch/gen2-gc/pypy/rpython/memory/gc/test Message-ID: <20100915121835.986DF282C16@codespeak.net> Author: arigo Date: Wed Sep 15 14:18:34 2010 New Revision: 77086 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Log: More test. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Wed Sep 15 14:18:34 2010 @@ -64,11 +64,16 @@ arenasize = pagesize * (nb_pages + 1) - 1 ac = ArenaCollection(arenasize, pagesize, 9*WORD) # - def link(pageaddr, size_class, size_block, nblocks, nusedblocks): + def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1): + assert step in (1, 2) llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER)) page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR) - page.nfree = 0 - page.nuninitialized = nblocks - nusedblocks + if step == 1: + page.nfree = 0 + page.nuninitialized = nblocks - nusedblocks + else: + page.nfree = nusedblocks + page.nuninitialized = nblocks - 2*nusedblocks if nusedblocks < nblocks: page.freeblock = pageaddr + hdrsize + nusedblocks * size_block chainedlists = ac.page_for_size @@ -78,9 +83,19 @@ page.nextpage = chainedlists[size_class] chainedlists[size_class] = page if fill_with_objects: - for i in range(nusedblocks): + for i in range(0, nusedblocks*step, step): objaddr = pageaddr + hdrsize + i * size_block llarena.arena_reserve(objaddr, _dummy_size(size_block)) + if step == 2: + prev = 'page.freeblock' + for i in range(1, nusedblocks*step, step): + holeaddr = pageaddr + hdrsize + i * size_block + llarena.arena_reserve(holeaddr, + llmemory.sizeof(llmemory.Address)) + exec '%s = holeaddr' % prev in globals(), locals() + prevhole = holeaddr + prev = 'prevhole.address[0]' + exec '%s = NULL' % prev in globals(), locals() # ac.allocate_new_arena() num_initialized_pages = len(pagelayout.rstrip(" ")) @@ -105,6 +120,12 @@ size_block = WORD * size_class nblocks = (pagesize - hdrsize) // size_block link(pageaddr, size_class, size_block, nblocks, nblocks) + elif c == '/': # a page 1/3 allocated, 1/3 freed, 1/3 uninit objs + size_class = fill_with_objects or 1 + size_block = WORD * size_class + nblocks = (pagesize - hdrsize) // size_block + link(pageaddr, size_class, size_block, nblocks, nblocks // 3, + step=2) # ac.allocate_new_arena = lambda: should_not_allocate_new_arenas return ac @@ -190,24 +211,30 @@ class OkToFree(object): def __init__(self, ac, answer): + assert callable(answer) or 0.0 <= answer <= 1.0 self.ac = ac self.answer = answer - self.seen = [] + self.lastnum = 0.0 + self.seen = {} def __call__(self, addr): - self.seen.append(addr - self.ac._startpageaddr) - if isinstance(self.answer, bool): - return self.answer + if callable(self.answer): + ok_to_free = self.answer(addr) else: - return self.answer(addr) + self.lastnum += self.answer + ok_to_free = self.lastnum >= 1.0 + if ok_to_free: + self.lastnum -= 1.0 + self.seen[addr - self.ac._startpageaddr] = ok_to_free + return ok_to_free def test_mass_free_partial_remains(): pagesize = hdrsize + 7*WORD ac = arena_collection_for_test(pagesize, "2", fill_with_objects=2) ok_to_free = OkToFree(ac, False) ac.mass_free(ok_to_free) - assert ok_to_free.seen == [hdrsize + 0*WORD, - hdrsize + 2*WORD] + assert ok_to_free.seen == {hdrsize + 0*WORD: False, + hdrsize + 2*WORD: False} page = getpage(ac, 0) assert page == ac.page_for_size[2] assert page.nextpage == PAGE_NULL @@ -221,8 +248,8 @@ ac = arena_collection_for_test(pagesize, "2", fill_with_objects=2) ok_to_free = OkToFree(ac, True) ac.mass_free(ok_to_free) - assert ok_to_free.seen == [hdrsize + 0*WORD, - hdrsize + 2*WORD] + assert ok_to_free.seen == {hdrsize + 0*WORD: True, + hdrsize + 2*WORD: True} pageaddr = pagenum(ac, 0) assert pageaddr == ac.free_pages assert pageaddr.address[0] == NULL @@ -233,9 +260,9 @@ ac = arena_collection_for_test(pagesize, "#", fill_with_objects=2) ok_to_free = OkToFree(ac, False) ac.mass_free(ok_to_free) - assert ok_to_free.seen == [hdrsize + 0*WORD, - hdrsize + 2*WORD, - hdrsize + 4*WORD] + assert ok_to_free.seen == {hdrsize + 0*WORD: False, + hdrsize + 2*WORD: False, + hdrsize + 4*WORD: False} page = getpage(ac, 0) assert page == ac.full_page_for_size[2] assert page.nextpage == PAGE_NULL @@ -244,3 +271,52 @@ assert page.freeblock == NULL assert ac.free_pages == NULL assert ac.page_for_size[2] == PAGE_NULL + +def test_mass_free_full_is_partially_emptied(): + pagesize = hdrsize + 9*WORD + ac = arena_collection_for_test(pagesize, "#", fill_with_objects=2) + ok_to_free = OkToFree(ac, 0.5) + ac.mass_free(ok_to_free) + assert ok_to_free.seen == {hdrsize + 0*WORD: False, + hdrsize + 2*WORD: True, + hdrsize + 4*WORD: False, + hdrsize + 6*WORD: True} + page = getpage(ac, 0) + pageaddr = pagenum(ac, 0) + assert page == ac.page_for_size[2] + assert page.nextpage == PAGE_NULL + assert page.nuninitialized == 0 + assert page.nfree == 2 + assert page.freeblock == pageaddr + hdrsize + 2*WORD + assert page.freeblock.address[0] == pageaddr + hdrsize + 6*WORD + assert page.freeblock.address[0].address[0] == NULL + assert ac.free_pages == NULL + assert ac.full_page_for_size[2] == PAGE_NULL + +def test_mass_free_half_page_remains(): + pagesize = hdrsize + 24*WORD + ac = arena_collection_for_test(pagesize, "/", fill_with_objects=2) + page = getpage(ac, 0) + assert page.nuninitialized == 4 + assert page.nfree == 4 + # + ok_to_free = OkToFree(ac, False) + ac.mass_free(ok_to_free) + assert ok_to_free.seen == {hdrsize + 0*WORD: False, + hdrsize + 4*WORD: False, + hdrsize + 8*WORD: False, + hdrsize + 12*WORD: False} + page = getpage(ac, 0) + pageaddr = pagenum(ac, 0) + assert page == ac.page_for_size[2] + assert page.nextpage == PAGE_NULL + assert page.nuninitialized == 4 + assert page.nfree == 4 + assert page.freeblock == pageaddr + hdrsize + 2*WORD + assert page.freeblock.address[0] == pageaddr + hdrsize + 6*WORD + assert page.freeblock.address[0].address[0] == \ + pageaddr + hdrsize + 10*WORD + assert page.freeblock.address[0].address[0].address[0] == \ + pageaddr + hdrsize + 14*WORD + assert ac.free_pages == NULL + assert ac.full_page_for_size[2] == PAGE_NULL From arigo at codespeak.net Wed Sep 15 14:21:00 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 15 Sep 2010 14:21:00 +0200 (CEST) Subject: [pypy-svn] r77087 - pypy/branch/gen2-gc/pypy/rpython/memory/gc/test Message-ID: <20100915122100.13FEA282C16@codespeak.net> Author: arigo Date: Wed Sep 15 14:20:58 2010 New Revision: 77087 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Log: More test. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Wed Sep 15 14:20:58 2010 @@ -320,3 +320,35 @@ pageaddr + hdrsize + 14*WORD assert ac.free_pages == NULL assert ac.full_page_for_size[2] == PAGE_NULL + +def test_mass_free_half_page_becomes_more_free(): + pagesize = hdrsize + 24*WORD + ac = arena_collection_for_test(pagesize, "/", fill_with_objects=2) + page = getpage(ac, 0) + assert page.nuninitialized == 4 + assert page.nfree == 4 + # + ok_to_free = OkToFree(ac, 0.5) + ac.mass_free(ok_to_free) + assert ok_to_free.seen == {hdrsize + 0*WORD: False, + hdrsize + 4*WORD: True, + hdrsize + 8*WORD: False, + hdrsize + 12*WORD: True} + page = getpage(ac, 0) + pageaddr = pagenum(ac, 0) + assert page == ac.page_for_size[2] + assert page.nextpage == PAGE_NULL + assert page.nuninitialized == 4 + assert page.nfree == 6 + fb = page.freeblock + assert fb == pageaddr + hdrsize + 2*WORD + assert fb.address[0] == pageaddr + hdrsize + 4*WORD + assert fb.address[0].address[0] == pageaddr + hdrsize + 6*WORD + assert fb.address[0].address[0].address[0] == \ + pageaddr + hdrsize + 10*WORD + assert fb.address[0].address[0].address[0].address[0] == \ + pageaddr + hdrsize + 12*WORD + assert fb.address[0].address[0].address[0].address[0].address[0] == \ + pageaddr + hdrsize + 14*WORD + assert ac.free_pages == NULL + assert ac.full_page_for_size[2] == PAGE_NULL From arigo at codespeak.net Wed Sep 15 14:29:18 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 15 Sep 2010 14:29:18 +0200 (CEST) Subject: [pypy-svn] r77088 - pypy/branch/gen2-gc/pypy/rpython/memory/gc/test Message-ID: <20100915122918.7E3D6282C16@codespeak.net> Author: arigo Date: Wed Sep 15 14:29:17 2010 New Revision: 77088 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Log: More test. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Wed Sep 15 14:29:17 2010 @@ -95,7 +95,8 @@ exec '%s = holeaddr' % prev in globals(), locals() prevhole = holeaddr prev = 'prevhole.address[0]' - exec '%s = NULL' % prev in globals(), locals() + endaddr = pageaddr + hdrsize + 2*nusedblocks * size_block + exec '%s = endaddr' % prev in globals(), locals() # ac.allocate_new_arena() num_initialized_pages = len(pagelayout.rstrip(" ")) @@ -196,6 +197,34 @@ obj = ac.malloc(2*WORD); chkob(ac, 4, 4*WORD, obj) obj = ac.malloc(3*WORD); chkob(ac, 6, 3*WORD, obj) +def test_malloc_from_partial_page(): + pagesize = hdrsize + 18*WORD + ac = arena_collection_for_test(pagesize, "/.", fill_with_objects=2) + page = getpage(ac, 0) + assert page.nfree == 3 + assert page.nuninitialized == 3 + chkob(ac, 0, 2*WORD, page.freeblock) + # + obj = ac.malloc(2*WORD); chkob(ac, 0, 2*WORD, obj) + obj = ac.malloc(2*WORD); chkob(ac, 0, 6*WORD, obj) + assert page.nfree == 1 + assert page.nuninitialized == 3 + chkob(ac, 0, 10*WORD, page.freeblock) + # + obj = ac.malloc(2*WORD); chkob(ac, 0, 10*WORD, obj) + assert page.nfree == 0 + assert page.nuninitialized == 3 + chkob(ac, 0, 12*WORD, page.freeblock) + # + obj = ac.malloc(2*WORD); chkob(ac, 0, 12*WORD, obj) + assert page.nuninitialized == 2 + obj = ac.malloc(2*WORD); chkob(ac, 0, 14*WORD, obj) + obj = ac.malloc(2*WORD); chkob(ac, 0, 16*WORD, obj) + assert page.nfree == 0 + assert page.nuninitialized == 0 + obj = ac.malloc(2*WORD); chkob(ac, 1, 0*WORD, obj) + + def test_malloc_new_arena(): pagesize = hdrsize + 7*WORD ac = arena_collection_for_test(pagesize, "### ") From arigo at codespeak.net Wed Sep 15 14:58:26 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 15 Sep 2010 14:58:26 +0200 (CEST) Subject: [pypy-svn] r77089 - in pypy/branch/gen2-gc/pypy/rpython/memory/gc: . test Message-ID: <20100915125826.A2CFB282C19@codespeak.net> Author: arigo Date: Wed Sep 15 14:58:24 2010 New Revision: 77089 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Log: Write a random test, and a fix found by it. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py Wed Sep 15 14:58:24 2010 @@ -96,7 +96,7 @@ freeblock = result.address[0] llarena.arena_reset(result, llmemory.sizeof(llmemory.Address), - False) + 0) # else: # The 'result' is part of the uninitialized blocks. @@ -116,7 +116,7 @@ page.nextpage = self.full_page_for_size[size_class] self.full_page_for_size[size_class] = page # - llarena.arena_reserve(result, _dummy_size(size), False) + llarena.arena_reserve(result, _dummy_size(size)) return result @@ -252,7 +252,7 @@ # Done by inserting it in the 'free_pages' list. pageaddr = llmemory.cast_ptr_to_adr(page) pageaddr = llarena.getfakearenaaddress(pageaddr) - llarena.arena_reset(pageaddr, llmemory.sizeof(PAGE_HEADER), 0) + llarena.arena_reset(pageaddr, self.page_size, 0) llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address)) pageaddr.address[0] = self.free_pages self.free_pages = pageaddr @@ -294,8 +294,7 @@ # The object should die. llarena.arena_reset(obj, _dummy_size(block_size), 0) llarena.arena_reserve(obj, - llmemory.sizeof(llmemory.Address), - False) + llmemory.sizeof(llmemory.Address)) # Insert 'obj' in the linked list of free blocks. prevfreeblockat.address[0] = obj prevfreeblockat = obj Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Wed Sep 15 14:58:24 2010 @@ -254,7 +254,9 @@ ok_to_free = self.lastnum >= 1.0 if ok_to_free: self.lastnum -= 1.0 - self.seen[addr - self.ac._startpageaddr] = ok_to_free + key = addr - self.ac._startpageaddr + assert key not in self.seen + self.seen[key] = ok_to_free return ok_to_free def test_mass_free_partial_remains(): @@ -381,3 +383,42 @@ pageaddr + hdrsize + 14*WORD assert ac.free_pages == NULL assert ac.full_page_for_size[2] == PAGE_NULL + +# ____________________________________________________________ + +def test_random(): + import random + pagesize = hdrsize + 24*WORD + ac = arena_collection_for_test(pagesize, " " * 28) + live_objects = {} + # + # Run the test until ac.allocate_new_arena() is called. + class DoneTesting(Exception): + pass + def done_testing(): + raise DoneTesting + ac.allocate_new_arena = done_testing + # + try: + while True: + # + # Allocate some more objects + for i in range(random.randrange(50, 100)): + size_class = random.randrange(1, 7) + obj = ac.malloc(size_class * WORD) + at = obj - ac._startpageaddr + assert at not in live_objects + live_objects[at] = None + # + # Free half the objects, randomly + ok_to_free = OkToFree(ac, lambda obj: random.random() < 0.5) + ac.mass_free(ok_to_free) + # + # Check that we have seen all objects + assert dict.fromkeys(ok_to_free.seen) == live_objects + for at, freed in ok_to_free.seen.items(): + if freed: + del live_objects[at] + except DoneTesting: + # the following output looks cool on a 112-character-wide terminal. + print ac._startpageaddr.arena.usagemap From arigo at codespeak.net Wed Sep 15 15:34:34 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 15 Sep 2010 15:34:34 +0200 (CEST) Subject: [pypy-svn] r77090 - pypy/branch/gen2-gc/pypy/rpython/lltypesystem Message-ID: <20100915133434.5AE49282C16@codespeak.net> Author: arigo Date: Wed Sep 15 15:34:32 2010 New Revision: 77090 Modified: pypy/branch/gen2-gc/pypy/rpython/lltypesystem/llarena.py Log: Add some comments and __repr__s. Modified: pypy/branch/gen2-gc/pypy/rpython/lltypesystem/llarena.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/lltypesystem/llarena.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/lltypesystem/llarena.py Wed Sep 15 15:34:32 2010 @@ -16,8 +16,11 @@ class Arena(object): object_arena_location = {} # {container: (arena, offset)} old_object_arena_location = weakref.WeakKeyDictionary() + _count_arenas = 0 def __init__(self, nbytes, zero): + Arena._count_arenas += 1 + self._arena_index = Arena._count_arenas self.nbytes = nbytes self.usagemap = array.array('c') self.objectptrs = {} # {offset: ptr-to-container} @@ -25,6 +28,9 @@ self.freed = False self.reset(zero) + def __repr__(self): + return '' % (self._arena_index, self.nbytes) + def reset(self, zero, start=0, size=None): self.check() if size is None: @@ -357,6 +363,11 @@ # This only works with linux's madvise(), which is really not a memory # usage hint but a real command. It guarantees that after MADV_DONTNEED # the pages are cleared again. + + # Note that the trick of the general 'posix' section below, i.e. + # reading /dev/zero, does not seem to have the correct effect of + # lazily-allocating pages on all Linux systems. + from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo _eci = ExternalCompilationInfo(includes=['sys/mman.h']) From arigo at codespeak.net Wed Sep 15 15:45:58 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 15 Sep 2010 15:45:58 +0200 (CEST) Subject: [pypy-svn] r77091 - pypy/branch/gen2-gc/pypy/rpython/memory/gc Message-ID: <20100915134558.93C54282C16@codespeak.net> Author: arigo Date: Wed Sep 15 15:45:57 2010 New Revision: 77091 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Log: * malloc_varsize_clear(). * external_malloc(). Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Wed Sep 15 15:45:57 2010 @@ -2,7 +2,7 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.memory.gc.base import MovingGCBase from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE -from pypy.rlib.rarithmetic import LONG_BIT +from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT from pypy.rlib.debug import ll_assert, debug_print WORD = LONG_BIT // 8 @@ -53,8 +53,8 @@ # where the object was moved. This means that all objects in the # nursery need to be at least 2 words long, but objects outside the # nursery don't need to. - minimal_size_in_nursery = (llmemory.sizeof(HDR) + - llmemory.sizeof(llmemory.Address)) + minimal_size_in_nursery = llmemory.raw_malloc_usage( + llmemory.sizeof(HDR) + llmemory.sizeof(llmemory.Address)) TRANSLATION_PARAMS = { @@ -93,9 +93,6 @@ self.ac = ArenaCollectionClass(arena_size, page_size, small_request_threshold) # - # A list of all raw_malloced objects (the objects too large) - self.rawmalloced_objects = self.AddressStack() - # # Used by minor collection: a list of non-young objects that # (may) contain a pointer to a young object. Populated by # the write barrier. @@ -112,6 +109,9 @@ # assert self.nursery_size > 0, "XXX" # + # A list of all raw_malloced objects (the objects too large) + self.rawmalloced_objects = self.AddressStack() + # # the start of the nursery: we actually allocate a tiny bit more for # the nursery than really needed, to simplify pointer arithmetic # in malloc_fixedsize_clear(). @@ -136,24 +136,66 @@ # If totalsize is greater than small_request_threshold, ask for # a rawmalloc. The following check should be constant-folded. if llmemory.raw_malloc_usage(totalsize) > self.small_request_threshold: - return self.external_malloc(typeid, size) + result = self.external_malloc(typeid, totalsize) + # + else: + # If totalsize is smaller than minimal_size_in_nursery, round it + # up. The following check should also be constant-folded. + if (llmemory.raw_malloc_usage(totalsize) < + llmemory.raw_malloc_usage(self.minimal_size_in_nursery)): + totalsize = self.minimal_size_in_nursery + # + # Get the memory from the nursery. If there is not enough space + # there, do a collect first. + result = self.nursery_next + self.nursery_next = result + totalsize + if self.nursery_next > self.nursery_top: + result = self.collect_and_reserve(totalsize) + # + # Build the object. + llarena.arena_reserve(result, totalsize) + self.init_gc_object(result, typeid, flags=0) + # + return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF) + + + def malloc_varsize_clear(self, typeid, length, size, itemsize, + offset_to_length, can_collect): + ll_assert(can_collect, "!can_collect") + size_gc_header = self.gcheaderbuilder.size_gc_header + nonvarsize = size_gc_header + size + try: + varsize = ovfcheck(itemsize * length) + totalsize = ovfcheck(nonvarsize + varsize) + except OverflowError: + raise MemoryError + # + # If totalsize is greater than small_request_threshold, ask for + # a rawmalloc. + if llmemory.raw_malloc_usage(totalsize) > self.small_request_threshold: + result = self.external_malloc(typeid, totalsize) + # + else: + # 'totalsize' should contain at least the GC header and + # the length word, so it should never be smaller than + # 'minimal_size_in_nursery' so far + ll_assert(llmemory.raw_malloc_usage(totalsize) >= + self.minimal_size_in_nursery, + "malloc_varsize_clear(): totalsize < minimalsize") + # + # Get the memory from the nursery. If there is not enough space + # there, do a collect first. + result = self.nursery_next + self.nursery_next = result + totalsize + if self.nursery_next > self.nursery_top: + result = self.collect_and_reserve(totalsize) + # + # Build the object. + llarena.arena_reserve(result, totalsize) + self.init_gc_object(result, typeid, flags=0) # - # If totalsize is smaller than minimal_size_in_nursery, round it up. - # The following check should also be constant-folded. - if (llmemory.raw_malloc_usage(totalsize) < - llmemory.raw_malloc_usage(self.minimal_size_in_nursery)): - totalsize = self.minimal_size_in_nursery - # - # Get the memory from the nursery. If there is not enough space - # there, do a collect first. - result = self.nursery_next - self.nursery_next = result + totalsize - if self.nursery_next > self.nursery_top: - result = self.collect_and_reserve(totalsize) - # - # Build the object. - llarena.arena_reserve(result, totalsize) - self.init_gc_object(result, typeid, flags=0) + # Set the length and return the object. + (result + size_gc_header + offset_to_length).signed[0] = length return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF) @@ -175,22 +217,19 @@ collect_and_reserve._dont_inline_ = True - def external_malloc(self, typeid, size): + def external_malloc(self, typeid, totalsize): """Allocate a large object using raw_malloc().""" # - # First check if we are called because we wanted to allocate - # an object that is larger than self.small_request_threshold. - size_gc_header = self.gcheaderbuilder.size_gc_header - totalsize = size_gc_header + size - xxxxxxx - # - # Yes: just use a raw_malloc() to get the object. result = llmemory.raw_malloc(totalsize) if not result: - raise MemoryError() - raw_memclear(result, totalsize) + raise MemoryError("cannot allocate large object") + llmemory.raw_memclear(result, totalsize) + self.init_gc_object(result, typeid, GCFLAG_NO_YOUNG_PTRS) + # + size_gc_header = self.gcheaderbuilder.size_gc_header self.rawmalloced_objects.append(result + size_gc_header) return result + external_malloc._dont_inline_ = True # ---------- @@ -301,8 +340,6 @@ """Perform a minor collection: find the objects from the nursery that remain alive and move them out.""" # - #print "nursery_collect()" - # # First, find the roots that point to nursery objects. These # nursery objects are copied out of the nursery. Note that # references to further nursery objects are not modified by @@ -428,6 +465,10 @@ self.collect_roots() self.visit_all_objects() # + # Walk all rawmalloced objects and free the ones that don't + # have the GCFLAG_VISITED flag. + self.free_unvisited_rawmalloc_objects() + # # Ask the ArenaCollection to visit all objects. Free the ones # that have not been visited above, and reset GCFLAG_VISITED on # the others. @@ -451,6 +492,21 @@ def _reset_gcflag_visited(self, obj, ignored=None): self.header(obj).tid &= ~GCFLAG_VISITED + def free_unvisited_rawmalloc_objects(self): + size_gc_header = self.gcheaderbuilder.size_gc_header + list = self.rawmalloced_objects + self.rawmalloced_objects = self.AddressStack() + # + while list.non_empty(): + obj = list.pop() + if self.header(obj).tid & GCFLAG_VISITED: + self.header(obj).tid &= ~GCFLAG_VISITED # survives + self.rawmalloced_objects.append(obj) + else: + llmemory.raw_free(obj - size_gc_header) + # + list.delete() + def collect_roots(self): # Collect all roots. Starts from all the objects From cfbolz at codespeak.net Wed Sep 15 16:55:28 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Wed, 15 Sep 2010 16:55:28 +0200 (CEST) Subject: [pypy-svn] r77092 - in pypy/branch/better-map-instances/pypy: interpreter objspace/std objspace/std/test Message-ID: <20100915145528.7FE8E282C16@codespeak.net> Author: cfbolz Date: Wed Sep 15 16:55:26 2010 New Revision: 77092 Modified: pypy/branch/better-map-instances/pypy/interpreter/typedef.py pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py pypy/branch/better-map-instances/pypy/objspace/std/objspace.py pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py Log: make instances even smaller by manually inlining the array into the object for instance sizes up to 10. Modified: pypy/branch/better-map-instances/pypy/interpreter/typedef.py ============================================================================== --- pypy/branch/better-map-instances/pypy/interpreter/typedef.py (original) +++ pypy/branch/better-map-instances/pypy/interpreter/typedef.py Wed Sep 15 16:55:26 2010 @@ -199,8 +199,8 @@ body[key] = value if (config.objspace.std.withmapdict and "dict" in features): - from pypy.objspace.std.mapdict import Object - add(Object) + from pypy.objspace.std.mapdict import BaseMapdictObject + add(BaseMapdictObject) features = () if "user" in features: # generic feature needed by all subcls Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Wed Sep 15 16:55:26 2010 @@ -76,8 +76,10 @@ new_storage[i] = obj._mapdict_read_storage(i) obj._set_mapdict_storage(new_storage) - obj._mapdict_write_storage(attr.position, w_value) + # the order is important here: first change the map, then the storage, + # for the benefit of the special subclasses obj._set_mapdict_map(attr) + obj._mapdict_write_storage(attr.position, w_value) def materialize_r_dict(self, space, obj, w_d): raise NotImplementedError("abstract base class") @@ -259,6 +261,7 @@ # RPython reasons w_obj._set_mapdict_map(new_obj.map) w_obj._set_mapdict_storage(new_obj.storage) + # ____________________________________________________________ # object implementation @@ -268,28 +271,18 @@ SLOTS_STARTING_FROM = 3 -class Object(W_Root): # slightly evil to make it inherit from W_Root +class BaseMapdictObject(W_Root): # slightly evil to make it inherit from W_Root def _init_empty(self, map): - from pypy.rlib.debug import make_sure_not_resized - self.map = map - self.storage = make_sure_not_resized([None] * map.size_estimate()) + raise NotImplementedError("abstract base class") + def _become(self, new_obj): - self.map = new_obj.map - self.storage = new_obj.storage + self._set_mapdict_map(new_obj.map) + self._set_mapdict_storage(new_obj.storage) def _get_mapdict_map(self): return jit.hint(self.map, promote=True) def _set_mapdict_map(self, map): self.map = map - def _mapdict_read_storage(self, index): - return self.storage[index] - def _mapdict_write_storage(self, index, value): - self.storage[index] = value - def _mapdict_storage_length(self): - return len(self.storage) - def _set_mapdict_storage(self, storage): - self.storage = storage - # _____________________________________________ # objspace interface @@ -363,6 +356,115 @@ assert isinstance(weakreflifeline, WeakrefLifeline) self._get_mapdict_map().write(self, ("weakref", SPECIAL), weakreflifeline) +class ObjectMixin(object): + _mixin_ = True + def _init_empty(self, map): + from pypy.rlib.debug import make_sure_not_resized + self.map = map + self.storage = make_sure_not_resized([None] * map.size_estimate()) + + def _mapdict_read_storage(self, index): + return self.storage[index] + def _mapdict_write_storage(self, index, value): + self.storage[index] = value + def _mapdict_storage_length(self): + return len(self.storage) + def _set_mapdict_storage(self, storage): + self.storage = storage + +class Object(ObjectMixin, BaseMapdictObject): + pass # mainly for tests + +def get_subclass_of_correct_size(space, supercls, w_type): + assert space.config.objspace.std.withmapdict + map = w_type.terminator + classes = memo_get_subclass_of_correct_size(space, supercls) + size = map.size_estimate() + if not size: + size = 1 + try: + return classes[size - 1] + except IndexError: + return classes[-1] + + +def memo_get_subclass_of_correct_size(space, supercls): + key = space, supercls + try: + return _subclass_cache[key] + except KeyError: + result = [] + for i in range(1, 11): # XXX tweak this number + result.append(_make_subclass_size_n(supercls, i)) + _subclass_cache[key] = result + return result +memo_get_subclass_of_correct_size._annspecialcase_ = "specialize:memo" +_subclass_cache = {} + +def _make_subclass_size_n(supercls, n): + from pypy.rlib import unroll, rerased + rangen = unroll.unrolling_iterable(range(n)) + nmin1 = n - 1 + rangenmin1 = unroll.unrolling_iterable(range(nmin1)) + class subcls(supercls): + def _init_empty(self, map): + from pypy.rlib.debug import make_sure_not_resized + for i in rangen: + setattr(self, "_value%s" % i, rerased.erase(None)) + self.map = map + + def _has_storage_list(self): + return self.map.length() > n + + def _mapdict_get_storage_list(self): + erased = getattr(self, "_value%s" % nmin1) + return rerased.unerase_fixedsizelist(erased, W_Root) + + def _mapdict_read_storage(self, index): + for i in rangenmin1: + if index == i: + erased = getattr(self, "_value%s" % i) + return rerased.unerase(erased, W_Root) + if self._has_storage_list(): + return self._mapdict_get_storage_list()[index - nmin1] + erased = getattr(self, "_value%s" % nmin1) + return rerased.unerase(erased, W_Root) + + def _mapdict_write_storage(self, index, value): + erased = rerased.erase(value) + for i in rangenmin1: + if index == i: + setattr(self, "_value%s" % i, erased) + return + if self._has_storage_list(): + self._mapdict_get_storage_list()[index - nmin1] = value + return + setattr(self, "_value%s" % nmin1, erased) + + def _mapdict_storage_length(self): + if self._has_storage_list(): + return len(self._mapdict_get_storage_list()) + n - 1 + return n + + def _set_mapdict_storage(self, storage): + len_storage = len(storage) + for i in rangenmin1: + if i < len_storage: + erased = rerased.erase(storage[i]) + else: + erased = rerased.erased(None) + setattr(self, "_value%s" % i, erased) + if len_storage < n: + erased = rerased.erase(None) + elif len_storage == n: + erased = rerased.erase(storage[nmin1]) + else: + storage_list = storage[nmin1:] + erased = rerased.erase_fixedsizelist(storage_list, W_Root) + setattr(self, "_value%s" % nmin1, erased) + + subcls.__name__ = supercls.__name__ + "Size%s" % n + return subcls # ____________________________________________________________ # dict implementation Modified: pypy/branch/better-map-instances/pypy/objspace/std/objspace.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/objspace.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/objspace.py Wed Sep 15 16:55:26 2010 @@ -321,6 +321,9 @@ subcls = get_unique_interplevel_subclass( self.config, cls, w_subtype.hasdict, w_subtype.nslots != 0, w_subtype.needsdel, w_subtype.weakrefable) + if self.config.objspace.std.withmapdict: + from pypy.objspace.std.mapdict import get_subclass_of_correct_size + subcls = get_subclass_of_correct_size(self, subcls, w_subtype) instance = instantiate(subcls) assert isinstance(instance, cls) instance.user_setup(self, w_subtype) Modified: pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py Wed Sep 15 16:55:26 2010 @@ -354,6 +354,56 @@ assert w_d.getitem_str("c") == 7 # ___________________________________________________________ +# check specialized classes + + +def test_specialized_class(): + from pypy.rlib import rerased + class Object(BaseMapdictObject): + class typedef: + hasdict = False + classes = memo_get_subclass_of_correct_size(space, Object) + w1 = W_Root() + w2 = W_Root() + w3 = W_Root() + w4 = W_Root() + w5 = W_Root() + w6 = W_Root() + for objectcls in classes: + cls = Class() + obj = objectcls() + obj.user_setup(space, cls) + obj.setdictvalue(space, "a", w1) + assert rerased.unerase(obj._value0, W_Root) is w1 + assert obj.getdictvalue(space, "a") is w1 + assert obj.getdictvalue(space, "b") is None + assert obj.getdictvalue(space, "c") is None + obj.setdictvalue(space, "a", w2) + assert rerased.unerase(obj._value0, W_Root) is w2 + assert obj.getdictvalue(space, "a") == w2 + assert obj.getdictvalue(space, "b") is None + assert obj.getdictvalue(space, "c") is None + + obj.setdictvalue(space, "b", w3) + #== [20, 30] + assert obj.getdictvalue(space, "a") is w2 + assert obj.getdictvalue(space, "b") is w3 + assert obj.getdictvalue(space, "c") is None + obj.setdictvalue(space, "b", w4) + assert obj.getdictvalue(space, "a") is w2 + assert obj.getdictvalue(space, "b") is w4 + assert obj.getdictvalue(space, "c") is None + + obj2 = objectcls() + obj2.user_setup(space, cls) + obj2.setdictvalue(space, "a", w5) + obj2.setdictvalue(space, "b", w6) + assert obj2.getdictvalue(space, "a") is w5 + assert obj2.getdictvalue(space, "b") is w6 + assert obj2.map is obj.map + + +# ___________________________________________________________ # integration tests # XXX write more From arigo at codespeak.net Wed Sep 15 17:07:43 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 15 Sep 2010 17:07:43 +0200 (CEST) Subject: [pypy-svn] r77093 - pypy/branch/gen2-gc/pypy/rpython/memory/gc Message-ID: <20100915150743.B3F59282C16@codespeak.net> Author: arigo Date: Wed Sep 15 17:07:42 2010 New Revision: 77093 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Log: id(). Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Wed Sep 15 17:07:42 2010 @@ -106,12 +106,16 @@ def setup(self): """Called at run-time to initialize the GC.""" + MovingGCBase.setup(self) # assert self.nursery_size > 0, "XXX" # # A list of all raw_malloced objects (the objects too large) self.rawmalloced_objects = self.AddressStack() # + # Support for id() + self.young_objects_with_id = self.AddressDict() + # # the start of the nursery: we actually allocate a tiny bit more for # the nursery than really needed, to simplify pointer arithmetic # in malloc_fixedsize_clear(). @@ -256,6 +260,9 @@ flags |= GCFLAG_NO_HEAP_PTRS | GCFLAG_NO_YOUNG_PTRS self.init_gc_object(addr, typeid16, flags) + def _can_never_move(self, obj): + return False # approximate good enough answer for id() + def is_in_nursery(self, addr): ll_assert(llmemory.cast_adr_to_int(addr) & 1 == 0, "odd-valued (i.e. tagged) pointer unexpected here") @@ -264,6 +271,10 @@ def is_forwarded_marker(self, tid): return isinstance(tid, int) and tid == FORWARDED_MARKER + def get_forwarding_address(self, obj): + obj = llarena.getfakearenaaddress(obj) + return obj.address[0] + def debug_check_object(self, obj): # after a minor or major collection, no object should be in the nursery ll_assert(not self.is_in_nursery(obj), @@ -354,6 +365,11 @@ # We proceed until 'old_objects_pointing_to_young' is empty. self.collect_oldrefs_to_nursery() # + # Update the id tracking of any object that was moved out of + # the nursery. + if self.young_objects_with_id.length() > 0: + self.update_young_objects_with_id() + # # Now all live nursery objects should be out, and the rest dies. # Fill the whole nursery with zero and reset the current nursery # pointer. @@ -407,8 +423,7 @@ # # If 'obj' was already forwarded, change it to its forwarding address. if self.is_forwarded_marker(self.header(obj).tid): - obj = llarena.getfakearenaaddress(obj) - root.address[0] = obj.address[0] + root.address[0] = self.get_forwarding_address(obj) #print '(already forwarded)' return # @@ -465,6 +480,10 @@ self.collect_roots() self.visit_all_objects() # + # Walk the 'objects_with_id' list and remove the ones that die, + # i.e. that don't have the GCFLAG_VISITED flag. + self.update_objects_with_id() + # # Walk all rawmalloced objects and free the ones that don't # have the GCFLAG_VISITED flag. self.free_unvisited_rawmalloc_objects() @@ -556,6 +575,61 @@ self.trace(obj, self._collect_ref, None) + # ---------- + # id() support + + def id(self, ptr): + """Implement id() of an object, given as a GCREF.""" + obj = llmemory.cast_ptr_to_adr(ptr) + # + # Is it a tagged pointer? For them, the result is odd-valued. + if not self.is_valid_gc_object(obj): + return llmemory.cast_adr_to_int(obj) + # + # Is the object still in the nursery? + if self.is_in_nursery(obj): + result = self.young_objects_with_id.get(obj) + if not result: + result = self._next_id() + self.young_objects_with_id.setitem(obj, result) + else: + result = self.objects_with_id.get(obj) + if not result: + # An 'obj' not in the nursery and not in 'objects_with_id' + # did not have its id() asked for and will not move any more, + # so we can just return its address as the result. + return llmemory.cast_adr_to_int(obj) + # + # If we reach here, 'result' is an odd number. If we double it, + # we have a number of the form 4n+2, which cannot collide with + # tagged pointers nor with any real address. + return llmemory.cast_adr_to_int(result) * 2 + + + def update_young_objects_with_id(self): + # Called during a minor collection. + self.young_objects_with_id.foreach(self._update_object_id, + self.objects_with_id) + self.young_objects_with_id.clear() + # NB. the clear() also makes the dictionary shrink back to its + # minimal size, which is actually a good idea: a large, mostly-empty + # table is bad for the next call to 'foreach'. + + def _update_object_id(self, obj, id, new_objects_with_id): + if self.is_forwarded_marker(self.header(obj).tid): + newobj = self.get_forwarding_address(obj) + new_objects_with_id.setitem(newobj, id) + else: + self.id_free_list.append(id) + + def _update_object_id_FAST(self, obj, id, new_objects_with_id): + # overrides the parent's version (a bit hackish) + if self.header(obj).tid & GCFLAG_VISITED: + new_objects_with_id.insertclean(obj, id) + else: + self.id_free_list.append(id) + + # ____________________________________________________________ # For testing, a simple implementation of ArenaCollection. From cfbolz at codespeak.net Wed Sep 15 17:22:14 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Wed, 15 Sep 2010 17:22:14 +0200 (CEST) Subject: [pypy-svn] r77094 - in pypy/branch/better-map-instances/pypy/objspace/std: . test Message-ID: <20100915152214.AF05A282C16@codespeak.net> Author: cfbolz Date: Wed Sep 15 17:22:13 2010 New Revision: 77094 Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py Log: fix typo Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Wed Sep 15 17:22:13 2010 @@ -452,7 +452,7 @@ if i < len_storage: erased = rerased.erase(storage[i]) else: - erased = rerased.erased(None) + erased = rerased.erase(None) setattr(self, "_value%s" % i, erased) if len_storage < n: erased = rerased.erase(None) Modified: pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py Wed Sep 15 17:22:13 2010 @@ -393,6 +393,14 @@ assert obj.getdictvalue(space, "a") is w2 assert obj.getdictvalue(space, "b") is w4 assert obj.getdictvalue(space, "c") is None + abmap = obj.map + + res = obj.deldictvalue(space, "a") + assert res + assert rerased.unerase(obj._value0, W_Root) is w4 + assert obj.getdictvalue(space, "a") is None + assert obj.getdictvalue(space, "b") is w4 + assert obj.getdictvalue(space, "c") is None obj2 = objectcls() obj2.user_setup(space, cls) @@ -400,7 +408,7 @@ obj2.setdictvalue(space, "b", w6) assert obj2.getdictvalue(space, "a") is w5 assert obj2.getdictvalue(space, "b") is w6 - assert obj2.map is obj.map + assert obj2.map is abmap # ___________________________________________________________ From arigo at codespeak.net Wed Sep 15 17:39:47 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 15 Sep 2010 17:39:47 +0200 (CEST) Subject: [pypy-svn] r77095 - in pypy/branch/gen2-gc/pypy/rpython: lltypesystem memory/gc Message-ID: <20100915153947.7E173282BE9@codespeak.net> Author: arigo Date: Wed Sep 15 17:39:45 2010 New Revision: 77095 Modified: pypy/branch/gen2-gc/pypy/rpython/lltypesystem/llarena.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Log: Implement identityhash(). Modified: pypy/branch/gen2-gc/pypy/rpython/lltypesystem/llarena.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/lltypesystem/llarena.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/lltypesystem/llarena.py Wed Sep 15 17:39:45 2010 @@ -303,6 +303,7 @@ assert isinstance(arena_addr, fakearenaaddress) assert arena_addr.offset == 0 arena_addr.arena.reset(False) + assert not arena_addr.arena.objectptrs arena_addr.arena.freed = True def arena_reset(arena_addr, size, zero): Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Wed Sep 15 17:39:45 2010 @@ -2,7 +2,7 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.memory.gc.base import MovingGCBase from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE -from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT +from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask from pypy.rlib.debug import ll_assert, debug_print WORD = LONG_BIT // 8 @@ -25,6 +25,11 @@ # The following flag is set on surviving objects during a major collection. GCFLAG_VISITED = first_gcflag << 2 +# The following flag is set on objects that have an extra hash field, +# except on nursery objects, where it means that it *will* grow a hash +# field when moving. +GCFLAG_HASHFIELD = first_gcflag << 3 + # Marker set to 'tid' during a minor collection when an object from # the nursery was forwarded. FORWARDED_MARKER = -1 @@ -85,6 +90,7 @@ MovingGCBase.__init__(self, config, chunk_size) self.nursery_size = nursery_size self.small_request_threshold = small_request_threshold + self.nursery_hash_base = -1 # # The ArenaCollection() handles the nonmovable objects allocation. if ArenaCollectionClass is None: @@ -376,6 +382,7 @@ llarena.arena_reset(self.nursery, self.nursery_size, 2) self.nursery_next = self.nursery # + self.change_nursery_hash_base() self.debug_check_consistency() # XXX expensive! @@ -428,17 +435,26 @@ return # # First visit to 'obj': we must move it out of the nursery. - # Allocate a new nonmovable location for it. size_gc_header = self.gcheaderbuilder.size_gc_header size = self.get_size(obj) totalsize = size_gc_header + size - newhdr = self.ac.malloc(totalsize) + totalsize_incl_hash = totalsize + if self.header(obj).tid & GCFLAG_HASHFIELD: + totalsize_incl_hash += llmemory.sizeof(lltype.Signed) + # + # Allocate a new nonmovable location for it. + newhdr = self.ac.malloc(totalsize_incl_hash) newobj = newhdr + size_gc_header # # Copy it. Note that references to other objects in the # nursery are kept unchanged in this step. llmemory.raw_memcopy(obj - size_gc_header, newhdr, totalsize) # + # Write the hash field too, if necessary. + if self.header(obj).tid & GCFLAG_HASHFIELD: + hash = self._compute_current_nursery_hash(obj) + (newhdr + (size_gc_header + size)).signed[0] = hash + # # Set the old object's tid to FORWARDED_MARKER and replace # the old object's content with the target address. # A bit of no-ops to convince llarena that we are changing @@ -578,9 +594,9 @@ # ---------- # id() support - def id(self, ptr): + def id(self, gcobj): """Implement id() of an object, given as a GCREF.""" - obj = llmemory.cast_ptr_to_adr(ptr) + obj = llmemory.cast_ptr_to_adr(gcobj) # # Is it a tagged pointer? For them, the result is odd-valued. if not self.is_valid_gc_object(obj): @@ -630,6 +646,46 @@ self.id_free_list.append(id) + # ---------- + # identityhash() support + + def identityhash(self, gcobj): + obj = llmemory.cast_ptr_to_adr(gcobj) + if self.is_in_nursery(obj): + # + # A nursery object's identityhash is never stored with the + # object, but returned by _compute_current_nursery_hash(). + # But we must set the GCFLAG_HASHFIELD to remember that + # we will have to store it into the object when it moves. + self.header(obj).tid |= GCFLAG_HASHFIELD + return self._compute_current_nursery_hash(obj) + # + if self.header(obj).tid & GCFLAG_HASHFIELD: + # + # An non-moving object with a hash field. + objsize = self.get_size(obj) + obj = llarena.getfakearenaaddress(obj) + return (obj + objsize).signed[0] + # + # No hash field needed. + return llmemory.cast_adr_to_int(obj) + + + def change_nursery_hash_base(self): + # The following should be enough to ensure that young objects + # tend to always get a different hash. It also makes sure that + # nursery_hash_base is not a multiple of WORD, to avoid collisions + # with the hash of non-young objects. + hash_base = self.nursery_hash_base + hash_base += self.nursery_size - 1 + if (hash_base & (WORD-1)) == 0: + hash_base -= 1 + self.nursery_hash_base = intmask(hash_base) + + def _compute_current_nursery_hash(self, obj): + return intmask(llmemory.cast_adr_to_int(obj) + self.nursery_hash_base) + + # ____________________________________________________________ # For testing, a simple implementation of ArenaCollection. @@ -651,7 +707,17 @@ ll_assert(nsize <= self.small_request_threshold,"malloc: size too big") ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned") # - result = llmemory.raw_malloc(size) + result = llarena.arena_malloc(nsize, False) + # + # Custom hack for the hash + if (isinstance(size, llmemory.CompositeOffset) and + isinstance(size.offsets[-1], llmemory.ItemOffset) and + size.offsets[-1].TYPE == lltype.Signed): + size_of_int = llmemory.sizeof(lltype.Signed) + size = sum(size.offsets[1:-1], size.offsets[0]) + llarena.arena_reserve(result + size, size_of_int) + # + llarena.arena_reserve(result, size) self.all_objects.append(result) return result @@ -660,6 +726,6 @@ self.all_objects = [] for rawobj in objs: if ok_to_free_func(rawobj): - llmemory.raw_free(rawobj) + llarena.arena_free(rawobj) else: self.all_objects.append(rawobj) From arigo at codespeak.net Wed Sep 15 18:09:29 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 15 Sep 2010 18:09:29 +0200 (CEST) Subject: [pypy-svn] r77096 - in pypy/branch/gen2-gc/pypy/rpython/memory/gc: . test Message-ID: <20100915160929.E2D03282C18@codespeak.net> Author: arigo Date: Wed Sep 15 18:09:28 2010 New Revision: 77096 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py Log: Bug fix with test. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Wed Sep 15 18:09:28 2010 @@ -1,6 +1,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory, llarena, llgroup from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.memory.gc.base import MovingGCBase +from pypy.rpython.memory.gc import minimarkpage from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask from pypy.rlib.debug import ll_assert, debug_print @@ -77,7 +78,7 @@ "arena_size": 65536*WORD, # The maximum size of an object allocated compactly. All objects - # that are larger are just allocated with raw_malloc(). + # that are larger or equal are just allocated with raw_malloc(). "small_request_threshold": 32*WORD, } @@ -85,7 +86,7 @@ nursery_size=32*WORD, page_size=16*WORD, arena_size=64*WORD, - small_request_threshold=5*WORD, + small_request_threshold=6*WORD, ArenaCollectionClass=None): MovingGCBase.__init__(self, config, chunk_size) self.nursery_size = nursery_size @@ -94,8 +95,7 @@ # # The ArenaCollection() handles the nonmovable objects allocation. if ArenaCollectionClass is None: - from pypy.rpython.memory.gc.minimarkpage import ArenaCollection - ArenaCollectionClass = ArenaCollection + ArenaCollectionClass = minimarkpage.ArenaCollection self.ac = ArenaCollectionClass(arena_size, page_size, small_request_threshold) # @@ -125,7 +125,7 @@ # the start of the nursery: we actually allocate a tiny bit more for # the nursery than really needed, to simplify pointer arithmetic # in malloc_fixedsize_clear(). - extra = self.small_request_threshold + extra = self.small_request_threshold - WORD self.nursery = llarena.arena_malloc(self.nursery_size + extra, True) if not self.nursery: raise MemoryError("cannot allocate nursery") @@ -143,9 +143,9 @@ size_gc_header = self.gcheaderbuilder.size_gc_header totalsize = size_gc_header + size # - # If totalsize is greater than small_request_threshold, ask for - # a rawmalloc. The following check should be constant-folded. - if llmemory.raw_malloc_usage(totalsize) > self.small_request_threshold: + # If totalsize is greater or equal than small_request_threshold, + # ask for a rawmalloc. The following check should be constant-folded. + if llmemory.raw_malloc_usage(totalsize)>=self.small_request_threshold: result = self.external_malloc(typeid, totalsize) # else: @@ -182,7 +182,7 @@ # # If totalsize is greater than small_request_threshold, ask for # a rawmalloc. - if llmemory.raw_malloc_usage(totalsize) > self.small_request_threshold: + if llmemory.raw_malloc_usage(totalsize)>=self.small_request_threshold: result = self.external_malloc(typeid, totalsize) # else: @@ -443,6 +443,8 @@ totalsize_incl_hash += llmemory.sizeof(lltype.Signed) # # Allocate a new nonmovable location for it. + # Note that 'totalsize' must be < small_request_threshold, so + # 'totalsize_incl_hash <= small_request_threshold'. newhdr = self.ac.malloc(totalsize_incl_hash) newobj = newhdr + size_gc_header # @@ -709,15 +711,7 @@ # result = llarena.arena_malloc(nsize, False) # - # Custom hack for the hash - if (isinstance(size, llmemory.CompositeOffset) and - isinstance(size.offsets[-1], llmemory.ItemOffset) and - size.offsets[-1].TYPE == lltype.Signed): - size_of_int = llmemory.sizeof(lltype.Signed) - size = sum(size.offsets[1:-1], size.offsets[0]) - llarena.arena_reserve(result + size, size_of_int) - # - llarena.arena_reserve(result, size) + minimarkpage.reserve_with_hash(result, size) self.all_objects.append(result) return result Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py Wed Sep 15 18:09:28 2010 @@ -50,6 +50,8 @@ _alloc_flavor_ = "raw" def __init__(self, arena_size, page_size, small_request_threshold): + # 'small_request_threshold' is the largest size that we + # can ask with self.malloc(). self.arena_size = arena_size self.page_size = page_size self.small_request_threshold = small_request_threshold @@ -116,7 +118,7 @@ page.nextpage = self.full_page_for_size[size_class] self.full_page_for_size[size_class] = page # - llarena.arena_reserve(result, _dummy_size(size)) + reserve_with_hash(result, _dummy_size(size)) return result @@ -337,3 +339,16 @@ if isinstance(size, int): size = llmemory.sizeof(lltype.Char) * size return size + +def reserve_with_hash(result, size): + # XXX translation + # + # Custom hack for the hash + if (isinstance(size, llmemory.CompositeOffset) and + isinstance(size.offsets[-1], llmemory.ItemOffset) and + size.offsets[-1].TYPE == lltype.Signed): + size_of_int = llmemory.sizeof(lltype.Signed) + size = sum(size.offsets[1:-1], size.offsets[0]) + llarena.arena_reserve(result + size, size_of_int) + # + llarena.arena_reserve(result, size) Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py Wed Sep 15 18:09:28 2010 @@ -326,6 +326,15 @@ self.gc.collect() assert hash == self.gc.identityhash(self.stackroots[-1]) self.stackroots.pop() + # (6) ask for the hash of varsized objects, larger and larger + for i in range(10): + self.gc.collect() + p = self.malloc(VAR, i) + self.stackroots.append(p) + hash = self.gc.identityhash(p) + self.gc.collect() + assert hash == self.gc.identityhash(self.stackroots[-1]) + self.stackroots.pop() class TestSemiSpaceGC(DirectGCTest): from pypy.rpython.memory.gc.semispace import SemiSpaceGC as GCClass From fijall at gmail.com Wed Sep 15 18:18:05 2010 From: fijall at gmail.com (Maciej Fijalkowski) Date: Wed, 15 Sep 2010 18:18:05 +0200 Subject: [pypy-svn] r77083 - pypy/branch/jitffi In-Reply-To: <20100915110721.C94F0282C16@codespeak.net> References: <20100915110721.C94F0282C16@codespeak.net> Message-ID: Hey anto. There was a SoC about that, I guess it would be good to chat about it at least (personally I think jitting rlib/libffi is exactly bad layer to be jitted and some experiments were done). Cheers, fijal On Wed, Sep 15, 2010 at 1:07 PM, wrote: > Author: antocuni > Date: Wed Sep 15 13:07:20 2010 > New Revision: 77083 > > Added: > ? pypy/branch/jitffi/ ? (props changed) > ? ? ?- copied from r77082, pypy/trunk/ > Log: > a branch in which to try to jit rlib/libffi.py > > > _______________________________________________ > pypy-svn mailing list > pypy-svn at codespeak.net > http://codespeak.net/mailman/listinfo/pypy-svn > From arigo at codespeak.net Wed Sep 15 18:27:06 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 15 Sep 2010 18:27:06 +0200 (CEST) Subject: [pypy-svn] r77097 - in pypy/branch/gen2-gc/pypy/rpython/memory/gc: . test Message-ID: <20100915162706.9C8C5282BE9@codespeak.net> Author: arigo Date: Wed Sep 15 18:27:04 2010 New Revision: 77097 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Log: Track the number of pages that are currently in use. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py Wed Sep 15 18:27:04 2010 @@ -74,6 +74,11 @@ self.uninitialized_pages = PAGE_NULL self.num_uninitialized_pages = 0 self.free_pages = NULL + self.used_pages = 0 # number of pages at least partially filled + + + def pages_in_use(self): + return self.used_pages def malloc(self, size): @@ -150,6 +155,7 @@ ll_assert(self.page_for_size[size_class] == PAGE_NULL, "allocate_new_page() called but a page is already waiting") self.page_for_size[size_class] = result + self.used_pages += 1 return result @@ -258,6 +264,7 @@ llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address)) pageaddr.address[0] = self.free_pages self.free_pages = pageaddr + self.used_pages -= 1 def walk_page(self, page, block_size, nblocks, ok_to_free_func): Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Wed Sep 15 18:27:04 2010 @@ -39,23 +39,27 @@ # ac = ArenaCollection(arenasize, pagesize, 99) assert ac.num_uninitialized_pages == 0 + assert ac.used_pages == 0 # page = ac.allocate_new_page(5) checknewpage(page, 5) assert ac.num_uninitialized_pages == 2 assert ac.uninitialized_pages - pagesize == cast_ptr_to_adr(page) assert ac.page_for_size[5] == page + assert ac.used_pages == 1 # page = ac.allocate_new_page(3) checknewpage(page, 3) assert ac.num_uninitialized_pages == 1 assert ac.uninitialized_pages - pagesize == cast_ptr_to_adr(page) assert ac.page_for_size[3] == page + assert ac.used_pages == 2 # page = ac.allocate_new_page(4) checknewpage(page, 4) assert ac.num_uninitialized_pages == 0 assert ac.page_for_size[4] == page + assert ac.used_pages == 3 def arena_collection_for_test(pagesize, pagelayout, fill_with_objects=False): @@ -389,7 +393,8 @@ def test_random(): import random pagesize = hdrsize + 24*WORD - ac = arena_collection_for_test(pagesize, " " * 28) + num_pages = 28 + ac = arena_collection_for_test(pagesize, " " * num_pages) live_objects = {} # # Run the test until ac.allocate_new_arena() is called. @@ -422,3 +427,4 @@ except DoneTesting: # the following output looks cool on a 112-character-wide terminal. print ac._startpageaddr.arena.usagemap + assert ac.used_pages == num_pages From cfbolz at codespeak.net Wed Sep 15 19:04:31 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Wed, 15 Sep 2010 19:04:31 +0200 (CEST) Subject: [pypy-svn] r77098 - pypy/branch/better-map-instances/pypy/objspace/std Message-ID: <20100915170431.4071E282BE9@codespeak.net> Author: cfbolz Date: Wed Sep 15 19:04:28 2010 New Revision: 77098 Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py pypy/branch/better-map-instances/pypy/objspace/std/objspace.py Log: need to specialize this Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Wed Sep 15 19:04:28 2010 @@ -375,7 +375,7 @@ class Object(ObjectMixin, BaseMapdictObject): pass # mainly for tests -def get_subclass_of_correct_size(space, supercls, w_type): +def get_subclass_of_correct_size(space, cls, supercls, w_type): assert space.config.objspace.std.withmapdict map = w_type.terminator classes = memo_get_subclass_of_correct_size(space, supercls) @@ -386,6 +386,7 @@ return classes[size - 1] except IndexError: return classes[-1] +get_subclass_of_correct_size._annspecialcase_ = "specialize:arg(1)" def memo_get_subclass_of_correct_size(space, supercls): Modified: pypy/branch/better-map-instances/pypy/objspace/std/objspace.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/objspace.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/objspace.py Wed Sep 15 19:04:28 2010 @@ -323,7 +323,7 @@ w_subtype.needsdel, w_subtype.weakrefable) if self.config.objspace.std.withmapdict: from pypy.objspace.std.mapdict import get_subclass_of_correct_size - subcls = get_subclass_of_correct_size(self, subcls, w_subtype) + subcls = get_subclass_of_correct_size(self, cls, subcls, w_subtype) instance = instantiate(subcls) assert isinstance(instance, cls) instance.user_setup(self, w_subtype) From afa at codespeak.net Wed Sep 15 19:37:30 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Wed, 15 Sep 2010 19:37:30 +0200 (CEST) Subject: [pypy-svn] r77099 - in pypy/branch/fast-forward/pypy: module/_multiprocessing module/_multiprocessing/test rlib Message-ID: <20100915173730.E70E5282BE9@codespeak.net> Author: afa Date: Wed Sep 15 19:37:29 2010 New Revision: 77099 Added: pypy/branch/fast-forward/pypy/module/_multiprocessing/ (props changed) pypy/branch/fast-forward/pypy/module/_multiprocessing/__init__.py (contents, props changed) pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py (contents, props changed) pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_win32.py (contents, props changed) pypy/branch/fast-forward/pypy/module/_multiprocessing/test/ (props changed) pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_win32.py (contents, props changed) Modified: pypy/branch/fast-forward/pypy/rlib/rwin32.py Log: Add the beginning of a _multiprocessing module Added: pypy/branch/fast-forward/pypy/module/_multiprocessing/__init__.py ============================================================================== --- (empty file) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/__init__.py Wed Sep 15 19:37:29 2010 @@ -0,0 +1,14 @@ +from pypy.interpreter.mixedmodule import MixedModule +import sys + +class Module(MixedModule): + + interpleveldefs = { + 'Connection' : 'interp_connection.W_SocketConnection', + } + + appleveldefs = { + } + + if sys.platform == 'win32': + interpleveldefs['win32'] = 'interp_win32.win32_namespace(space)' Added: pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py ============================================================================== --- (empty file) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py Wed Sep 15 19:37:29 2010 @@ -0,0 +1,43 @@ +from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.typedef import TypeDef, GetSetProperty + +INVALID_HANDLE_VALUE = -1 +READABLE = 1 +WRITABLE = 2 + + +class W_BaseConnection(Wrappable): + def __init__(self, handle, flags): + self.handle = handle + self.flags = flags + + def descr_repr(self, space): + conn_type = ["read-only", "write-only", "read-write"][self.flags] + + return space.wrap("<%s %s, handle %zd>" % ( + conn_type, space.type(self).getname(space, '?'), self.handle)) + + def close(self): + if self.handle != INVALID_HANDLE_VALUE: + self.do_close() + self.handle = INVALID_HANDLE_VALUE + + def __del__(self): + self.close() + + def closed_get(space, self): + return space.w_bool(self.handle == INVALID_HANDLE_VALUE) + def readable_get(space, self): + return space.w_bool(self.flags & READABLE) + def writable_get(space, self): + return space.w_bool(self.flags & WRITABLE) + +class W_SocketConnection(W_BaseConnection): + pass + +W_SocketConnection.typedef = TypeDef( + 'Connection', + closed = GetSetProperty(W_BaseConnection.closed_get), + readable = GetSetProperty(W_BaseConnection.readable_get), + writable = GetSetProperty(W_BaseConnection.writable_get), +) Added: pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_win32.py ============================================================================== --- (empty file) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_win32.py Wed Sep 15 19:37:29 2010 @@ -0,0 +1,30 @@ +from pypy.interpreter import gateway +from pypy.interpreter.function import StaticMethod +from pypy.interpreter.error import wrap_windowserror +from pypy.rlib import rwin32 +from pypy.rpython.lltypesystem import rffi + +def handle_w(space, w_handle): + return rffi.cast(rwin32.HANDLE, space.int_w(w_handle)) + +def CloseHandle(space, w_handle): + handle = handle_w(space, w_handle) + if not rwin32.CloseHandle(handle): + raise wrap_windowserror(space, rwin32.lastWindowsError()) + +def win32_namespace(space): + "NOT_RPYTHON" + w_win32 = space.call_function(space.w_type, + space.wrap("win32"), + space.newtuple([]), + space.newdict()) + try: + for name in ['CloseHandle', + ]: + function = globals()[name] + w_function = space.wrap(gateway.interp2app(function)) + w_method = space.wrap(StaticMethod(w_function)) + space.setattr(w_win32, space.wrap(name), w_method) + except Exception, e: + import pdb; pdb.set_trace() + return w_win32 Added: pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_win32.py ============================================================================== --- (empty file) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_win32.py Wed Sep 15 19:37:29 2010 @@ -0,0 +1,13 @@ +import py +import sys +from pypy.conftest import gettestobjspace + +class AppTestWin32: + def setup_class(cls): + if sys.platform != "win32": + py.test.skip("win32 only") + cls.space = gettestobjspace(usemodules=('_multiprocessing',)) + + def test_CloseHandle(self): + import _multiprocessing + raises(WindowsError, _multiprocessing.win32.CloseHandle, -1) Modified: pypy/branch/fast-forward/pypy/rlib/rwin32.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rwin32.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rwin32.py Wed Sep 15 19:37:29 2010 @@ -102,7 +102,7 @@ FreeLibrary = winexternal('FreeLibrary', [rffi.VOIDP], BOOL) LocalFree = winexternal('LocalFree', [HLOCAL], DWORD) - CloseHandle = winexternal('CloseHandle', [HANDLE], lltype.Void) + CloseHandle = winexternal('CloseHandle', [HANDLE], BOOL) FormatMessage = winexternal( 'FormatMessageA', From cfbolz at codespeak.net Wed Sep 15 19:51:35 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Wed, 15 Sep 2010 19:51:35 +0200 (CEST) Subject: [pypy-svn] r77100 - pypy/branch/better-map-instances/pypy/objspace/std Message-ID: <20100915175135.B2F3C282BE9@codespeak.net> Author: cfbolz Date: Wed Sep 15 19:51:34 2010 New Revision: 77100 Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Log: checking in a hack to test a theory on tannit Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Wed Sep 15 19:51:34 2010 @@ -403,6 +403,8 @@ _subclass_cache = {} def _make_subclass_size_n(supercls, n): + if supercls._init_empty.im_func is not BaseMapdictObject._init_empty.im_func: + return supercls from pypy.rlib import unroll, rerased rangen = unroll.unrolling_iterable(range(n)) nmin1 = n - 1 From hakanardo at codespeak.net Thu Sep 16 07:27:48 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Thu, 16 Sep 2010 07:27:48 +0200 (CEST) Subject: [pypy-svn] r77101 - in pypy/trunk/pypy: jit/tl module/__builtin__ module/__builtin__/test module/pypyjit/test Message-ID: <20100916052748.2421F282C23@codespeak.net> Author: hakanardo Date: Thu Sep 16 07:27:46 2010 New Revision: 77101 Modified: pypy/trunk/pypy/jit/tl/pypyjit_demo.py pypy/trunk/pypy/module/__builtin__/functional.py pypy/trunk/pypy/module/__builtin__/test/test_minmax.py pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py Log: Allow jit to unroll calls to max() and min() with more than one argument. Modified: pypy/trunk/pypy/jit/tl/pypyjit_demo.py ============================================================================== --- pypy/trunk/pypy/jit/tl/pypyjit_demo.py (original) +++ pypy/trunk/pypy/jit/tl/pypyjit_demo.py Thu Sep 16 07:27:46 2010 @@ -39,16 +39,24 @@ try: from array import array + + def coords(w,h): + y = 0 + while y < h: + x = 0 + while x < w: + yield x,y + x += 1 + y += 1 + def f(img): - i=0 sa=0 - while i < img.__len__(): - sa+=img[i] - i+=1 + for x, y in coords(4,4): + sa += x * y return sa - img=array('h',(1,2,3,4)) - print f(img) + #img=array('h',(1,2,3,4)) + print f(3) except Exception, e: print "Exception: ", type(e) print e Modified: pypy/trunk/pypy/module/__builtin__/functional.py ============================================================================== --- pypy/trunk/pypy/module/__builtin__/functional.py (original) +++ pypy/trunk/pypy/module/__builtin__/functional.py Thu Sep 16 07:27:46 2010 @@ -13,6 +13,7 @@ from pypy.rlib.objectmodel import specialize from pypy.module.__builtin__.app_functional import range as app_range from inspect import getsource, getfile +from pypy.rlib.jit import unroll_safe """ Implementation of the common integer case of range. Instead of handling @@ -96,12 +97,31 @@ return W_RangeListObject(start, step, howmany) + at unroll_safe @specialize.arg(2) def min_max(space, args, implementation_of): if implementation_of == "max": compare = space.gt else: compare = space.lt + + args_w = args.arguments_w + if len(args_w) > 1 and not args.keywords: # Unrollable case + w_max_item = None + for w_item in args_w: + if w_max_item is None or \ + space.is_true(compare(w_item, w_max_item)): + w_max_item = w_item + return w_max_item + else: + return min_max_loop(space, args, implementation_of) + + at specialize.arg(2) +def min_max_loop(space, args, implementation_of): + if implementation_of == "max": + compare = space.gt + else: + compare = space.lt args_w = args.arguments_w if len(args_w) > 1: w_sequence = space.newtuple(args_w) Modified: pypy/trunk/pypy/module/__builtin__/test/test_minmax.py ============================================================================== --- pypy/trunk/pypy/module/__builtin__/test/test_minmax.py (original) +++ pypy/trunk/pypy/module/__builtin__/test/test_minmax.py Thu Sep 16 07:27:46 2010 @@ -51,3 +51,37 @@ def test_max_empty(self): raises(ValueError, max, []) + +class AppTestMaxTuple: + + def test_max_usual(self): + assert max((1, 2, 3)) == 3 + + def test_max_floats(self): + assert max((0.1, 2.7, 14.7)) == 14.7 + + def test_max_chars(self): + assert max(('a', 'b', 'c')) == 'c' + + def test_max_strings(self): + assert max(('aaa', 'bbb', 'c')) == 'c' + + def test_max_mixed(self): + assert max(('1', 2, 3, 'aa')) == 'aa' + +class AppTestMinList: + + def test_min_usual(self): + assert min([1, 2, 3]) == 1 + + def test_min_floats(self): + assert min([0.1, 2.7, 14.7]) == 0.1 + + def test_min_chars(self): + assert min(['a', 'b', 'c']) == 'a' + + def test_min_strings(self): + assert min(['aaa', 'bbb', 'c']) == 'aaa' + + def test_min_mixed(self): + assert min(['1', 2, 3, 'aa']) == 2 Modified: pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py ============================================================================== --- pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py (original) +++ pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py Thu Sep 16 07:27:46 2010 @@ -762,6 +762,8 @@ else: n = 215 + print + print 'Test:', e1, e2, n, res self.run_source(''' class tst: pass @@ -779,6 +781,25 @@ return sa '''%(e1, e2), n, ([], res)) + def test_boolrewrite_ptr_single(self): + self.run_source(''' + class tst: + pass + def main(): + a = tst() + b = tst() + c = tst() + sa = 0 + for i in range(1000): + if a == b: sa += 1 + else: sa += 2 + if a != b: sa += 10000 + else: sa += 20000 + if i > 750: a = b + return sa + ''', 215, ([], 12481752)) + assert False + def test_array_sum(self): for tc, maxops in zip('bhilBHILfd', (38,) * 6 + (40, 40, 41, 38)): res = 19352859 @@ -1059,7 +1080,16 @@ ''', 170, ([], 1239690.0)) - + def test_min_max(self): + self.run_source(''' + def main(): + i=0 + sa=0 + while i < 2000: + sa+=min(max(i, 3000), 4000) + i+=1 + return sa + ''', 51, ([], 2000*3000)) # test_circular From hakanardo at codespeak.net Thu Sep 16 07:56:10 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Thu, 16 Sep 2010 07:56:10 +0200 (CEST) Subject: [pypy-svn] r77102 - pypy/trunk/pypy/module/pypyjit/test Message-ID: <20100916055610.B1BF0282C23@codespeak.net> Author: hakanardo Date: Thu Sep 16 07:56:09 2010 New Revision: 77102 Modified: pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py Log: Testing max with varying nbr of args Modified: pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py ============================================================================== --- pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py (original) +++ pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py Thu Sep 16 07:56:09 2010 @@ -1091,6 +1091,28 @@ return sa ''', 51, ([], 2000*3000)) + def test_silly_max(self): + self.run_source(''' + def main(): + i=2 + sa=0 + while i < 2000: + sa+=max(*range(i)) + i+=1 + return sa + ''', 125, ([], 1997001)) + + def test_iter_max(self): + self.run_source(''' + def main(): + i=2 + sa=0 + while i < 2000: + sa+=max(range(i)) + i+=1 + return sa + ''', 0, ([], 1997001)) + # test_circular class AppTestJIT(PyPyCJITTests): From hakanardo at codespeak.net Thu Sep 16 07:59:17 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Thu, 16 Sep 2010 07:59:17 +0200 (CEST) Subject: [pypy-svn] r77103 - pypy/trunk/pypy/module/pypyjit/test Message-ID: <20100916055917.BD068282C23@codespeak.net> Author: hakanardo Date: Thu Sep 16 07:59:16 2010 New Revision: 77103 Modified: pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py Log: typo Modified: pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py ============================================================================== --- pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py (original) +++ pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py Thu Sep 16 07:59:16 2010 @@ -1111,7 +1111,7 @@ sa+=max(range(i)) i+=1 return sa - ''', 0, ([], 1997001)) + ''', 88, ([], 1997001)) # test_circular From hakanardo at codespeak.net Thu Sep 16 09:40:52 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Thu, 16 Sep 2010 09:40:52 +0200 (CEST) Subject: [pypy-svn] r77104 - pypy/trunk/pypy/module/__builtin__ Message-ID: <20100916074052.CABE5282C23@codespeak.net> Author: hakanardo Date: Thu Sep 16 09:40:50 2010 New Revision: 77104 Modified: pypy/trunk/pypy/module/__builtin__/functional.py Log: Don't unroll to much Modified: pypy/trunk/pypy/module/__builtin__/functional.py ============================================================================== --- pypy/trunk/pypy/module/__builtin__/functional.py (original) +++ pypy/trunk/pypy/module/__builtin__/functional.py Thu Sep 16 09:40:50 2010 @@ -106,7 +106,8 @@ compare = space.lt args_w = args.arguments_w - if len(args_w) > 1 and not args.keywords: # Unrollable case + if len(args_w) > 1 and len(args_w) < 10 and not args.keywords: + # Unrollable case w_max_item = None for w_item in args_w: if w_max_item is None or \ From david at codespeak.net Thu Sep 16 10:53:16 2010 From: david at codespeak.net (david at codespeak.net) Date: Thu, 16 Sep 2010 10:53:16 +0200 (CEST) Subject: [pypy-svn] r77105 - pypy/branch/resoperation-refactoring Message-ID: <20100916085316.5FC26282C28@codespeak.net> Author: david Date: Thu Sep 16 10:53:14 2010 New Revision: 77105 Added: pypy/branch/resoperation-refactoring/ (props changed) - copied from r77104, pypy/trunk/ Log: (david, antocuni, cfbolz) a branch where to refactor resoperations to be more compact. From cfbolz at codespeak.net Thu Sep 16 11:04:50 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Thu, 16 Sep 2010 11:04:50 +0200 (CEST) Subject: [pypy-svn] r77106 - pypy/branch/better-map-instances/pypy/objspace/std Message-ID: <20100916090450.9D479282C23@codespeak.net> Author: cfbolz Date: Thu Sep 16 11:04:49 2010 New Revision: 77106 Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Log: fix translation Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Thu Sep 16 11:04:49 2010 @@ -403,7 +403,7 @@ _subclass_cache = {} def _make_subclass_size_n(supercls, n): - if supercls._init_empty.im_func is not BaseMapdictObject._init_empty.im_func: + if not hasattr(supercls, "_init_empty"): return supercls from pypy.rlib import unroll, rerased rangen = unroll.unrolling_iterable(range(n)) @@ -585,6 +585,7 @@ INVALID_CACHE_ENTRY = CacheEntry() INVALID_CACHE_ENTRY.map = objectmodel.instantiate(AbstractAttribute) # different from any real map ^^^ +INVALID_CACHE_ENTRY.map.w_cls = None def init_mapdict_cache(pycode): num_entries = len(pycode.co_names_w) From david at codespeak.net Thu Sep 16 13:52:42 2010 From: david at codespeak.net (david at codespeak.net) Date: Thu, 16 Sep 2010 13:52:42 +0200 (CEST) Subject: [pypy-svn] r77107 - in pypy/branch/resoperation-refactoring/pypy/jit: backend/llgraph metainterp metainterp/optimizeopt metainterp/test Message-ID: <20100916115242.2BC47282C23@codespeak.net> Author: david Date: Thu Sep 16 13:52:39 2010 New Revision: 77107 Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/intbounds.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py Log: (david, antocuni) Initial interface changes for resoperation Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py Thu Sep 16 13:52:39 2010 @@ -160,7 +160,8 @@ if self.is_oo and isinstance(descr, (OODescr, MethDescr)): # hack hack, not rpython c._obj.externalobj.operations[-1].descr = descr - for x in op.args: + for i in range(op.numargs()): + x = op.getarg(i) if isinstance(x, history.Box): llimpl.compile_add_var(c, var2index[x]) elif isinstance(x, history.ConstInt): @@ -280,7 +281,7 @@ def __init__(self, *args, **kwds): BaseCPU.__init__(self, *args, **kwds) self.fielddescrof_vtable = self.fielddescrof(rclass.OBJECT, 'typeptr') - + def fielddescrof(self, S, fieldname): ofs, size = symbolic.get_field_token(S, fieldname) token = history.getkind(getattr(S, fieldname)) @@ -504,7 +505,7 @@ return ootype.cast_to_object(e) else: return ootype.NULL - + def get_exc_value(self): if llimpl._last_exception: earg = llimpl._last_exception.args[1] @@ -580,7 +581,7 @@ x = descr.callmeth(selfbox, argboxes) # XXX: return None if METH.RESULT is Void return x - + def make_getargs(ARGS): argsiter = unrolling_iterable(ARGS) @@ -612,7 +613,7 @@ class KeyManager(object): """ Helper class to convert arbitrary dictionary keys to integers. - """ + """ def __init__(self): self.keys = {} @@ -695,7 +696,7 @@ self.ARRAY = ARRAY = ootype.Array(TYPE) def create(): return boxresult(TYPE, ootype.new(TYPE)) - + def create_array(lengthbox): n = lengthbox.getint() return boxresult(ARRAY, ootype.oonewarray(ARRAY, n)) @@ -757,7 +758,7 @@ obj = objbox.getref(TYPE) value = unwrap(T, valuebox) setattr(obj, fieldname, value) - + self.getfield = getfield self.setfield = setfield self._is_pointer_field = (history.getkind(T) == 'ref') Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py Thu Sep 16 13:52:39 2010 @@ -51,7 +51,7 @@ def compile_new_loop(metainterp, old_loop_tokens, greenkey, start): """Try to compile a new loop by closing the current history back to the first operation. - """ + """ history = metainterp.history loop = create_empty_loop(metainterp) loop.greenkey = greenkey @@ -133,7 +133,7 @@ metainterp_sd.profiler.end_backend() if not we_are_translated(): metainterp_sd.stats.compiled() - metainterp_sd.log("compiled new bridge") + metainterp_sd.log("compiled new bridge") # ____________________________________________________________ @@ -177,7 +177,7 @@ class TerminatingLoopToken(LoopToken): terminating = True - + def __init__(self, nargs, finishdescr): self.specnodes = [prebuiltNotSpecNode]*nargs self.finishdescr = finishdescr @@ -508,7 +508,7 @@ def compile_new_bridge(metainterp, old_loop_tokens, resumekey): """Try to compile a new bridge leading from the beginning of the history to some existing place. - """ + """ # The history contains new operations to attach as the code for the # failure of 'resumekey.guard_op'. # @@ -546,7 +546,8 @@ # e.g. loop_tokens_done_with_this_frame_void[0] # Replace the operation with the real operation we want, i.e. a FINISH descr = target_loop_token.finishdescr - new_op = ResOperation(rop.FINISH, op.args, None, descr=descr) + args = [op.getarg(i) for i in range(op.numargs())] + new_op = ResOperation(rop.FINISH, args, None, descr=descr) new_loop.operations[-1] = new_op # ____________________________________________________________ Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py Thu Sep 16 13:52:39 2010 @@ -532,7 +532,7 @@ class BoxFloat(Box): type = FLOAT _attrs_ = ('value',) - + def __init__(self, floatval=0.0): assert isinstance(floatval, float) self.value = floatval @@ -759,12 +759,13 @@ assert len(seen) == len(inputargs), ( "duplicate Box in the Loop.inputargs") TreeLoop.check_consistency_of_branch(operations, seen) - + @staticmethod def check_consistency_of_branch(operations, seen): "NOT_RPYTHON" for op in operations: - for box in op.args: + for i in range(op.numargs()): + box = op.getarg(i) if isinstance(box, Box): assert box in seen if op.is_guard(): @@ -885,7 +886,7 @@ self.aborted_count += 1 def entered(self): - self.enter_count += 1 + self.enter_count += 1 def compiled(self): self.compiled_count += 1 @@ -898,7 +899,7 @@ def add_new_loop(self, loop): self.loops.append(loop) - + # test read interface def get_all_loops(self): Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py Thu Sep 16 13:52:39 2010 @@ -80,10 +80,10 @@ for i in range(len(operations)): op = operations[i] if op.opnum == rop.DEBUG_MERGE_POINT: - loc = op.args[0]._get_str() + loc = op.getarg(0)._get_str() debug_print("debug_merge_point('%s')" % (loc,)) continue - args = ", ".join([self.repr_of_arg(memo, arg) for arg in op.args]) + args = ", ".join([self.repr_of_arg(memo, op.getarg(i)) for i in range(op.numargs())]) if op.result is not None: res = self.repr_of_arg(memo, op.result) + " = " else: Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py Thu Sep 16 13:52:39 2010 @@ -154,7 +154,8 @@ def find_nodes_default(self, op): if op.is_always_pure(): - for arg in op.args: + for i in range(op.numargs()): + arg = op.getarg(i) if self.get_constant_box(arg) is None: break else: @@ -164,8 +165,8 @@ op.opnum, argboxes, op.descr) self.set_constant_node(op.result, resbox.constbox()) # default case: mark the arguments as escaping - for box in op.args: - self.getnode(box).mark_escaped() + for i in range(op.numargs()): + self.getnode(op.getarg(i)).mark_escaped() def find_nodes_no_escape(self, op): pass # for operations that don't escape their arguments @@ -178,7 +179,7 @@ def find_nodes_NEW_WITH_VTABLE(self, op): instnode = InstanceNode() - box = op.args[0] + box = op.getarg(0) assert isinstance(box, Const) instnode.knownclsbox = box self.nodes[op.result] = instnode @@ -189,7 +190,7 @@ self.nodes[op.result] = instnode def find_nodes_NEW_ARRAY(self, op): - lengthbox = op.args[0] + lengthbox = op.getarg(0) lengthbox = self.get_constant_box(lengthbox) if lengthbox is None: return # var-sized arrays are not virtual @@ -199,28 +200,28 @@ self.nodes[op.result] = arraynode def find_nodes_ARRAYLEN_GC(self, op): - arraynode = self.getnode(op.args[0]) + arraynode = self.getnode(op.getarg(0)) if arraynode.arraydescr is not None: resbox = ConstInt(arraynode.arraysize) self.set_constant_node(op.result, resbox) def find_nodes_GUARD_CLASS(self, op): - instnode = self.getnode(op.args[0]) + instnode = self.getnode(op.getarg(0)) if instnode.fromstart: # only useful (and safe) in this case - box = op.args[1] + box = op.getarg(1) assert isinstance(box, Const) instnode.knownclsbox = box def find_nodes_GUARD_VALUE(self, op): - instnode = self.getnode(op.args[0]) + instnode = self.getnode(op.getarg(0)) if instnode.fromstart: # only useful (and safe) in this case - box = op.args[1] + box = op.getarg(1) assert isinstance(box, Const) instnode.knownvaluebox = box def find_nodes_SETFIELD_GC(self, op): - instnode = self.getnode(op.args[0]) - fieldnode = self.getnode(op.args[1]) + instnode = self.getnode(op.getarg(0)) + fieldnode = self.getnode(op.getarg(1)) if instnode.escaped: fieldnode.mark_escaped() return # nothing to be gained from tracking the field @@ -232,7 +233,7 @@ instnode.add_escape_dependency(fieldnode) def find_nodes_GETFIELD_GC(self, op): - instnode = self.getnode(op.args[0]) + instnode = self.getnode(op.getarg(0)) if instnode.escaped: return # nothing to be gained from tracking the field field = op.descr @@ -254,13 +255,13 @@ find_nodes_GETFIELD_GC_PURE = find_nodes_GETFIELD_GC def find_nodes_SETARRAYITEM_GC(self, op): - indexbox = op.args[1] + indexbox = op.getarg(1) indexbox = self.get_constant_box(indexbox) if indexbox is None: self.find_nodes_default(op) # not a Const index return - arraynode = self.getnode(op.args[0]) - itemnode = self.getnode(op.args[2]) + arraynode = self.getnode(op.getarg(0)) + itemnode = self.getnode(op.getarg(2)) if arraynode.escaped: itemnode.mark_escaped() return # nothing to be gained from tracking the item @@ -270,12 +271,12 @@ arraynode.add_escape_dependency(itemnode) def find_nodes_GETARRAYITEM_GC(self, op): - indexbox = op.args[1] + indexbox = op.getarg(1) indexbox = self.get_constant_box(indexbox) if indexbox is None: self.find_nodes_default(op) # not a Const index return - arraynode = self.getnode(op.args[0]) + arraynode = self.getnode(op.getarg(0)) if arraynode.escaped: return # nothing to be gained from tracking the item index = indexbox.getint() @@ -298,13 +299,15 @@ def find_nodes_JUMP(self, op): # only set up the 'unique' field of the InstanceNodes; # real handling comes later (build_result_specnodes() for loops). - for box in op.args: + for i in range(op.numargs()): + box = op.getarg(i) self.getnode(box).set_unique_nodes() def find_nodes_FINISH(self, op): # only for bridges, and only for the ones that end in a 'return' # or 'raise'; all other cases end with a JUMP. - for box in op.args: + for i in range(op.numargs()): + box = op.getarg(i) self.getnode(box).unique = UNIQUE_NO find_nodes_ops = _findall(NodeFinder, 'find_nodes_') @@ -344,13 +347,13 @@ # computed by NodeFinder.find_nodes(). op = loop.operations[-1] assert op.opnum == rop.JUMP - assert len(self.inputnodes) == len(op.args) + assert len(self.inputnodes) == op.numargs() while True: self.restart_needed = False specnodes = [] - for i in range(len(op.args)): + for i in range(op.numargs()): inputnode = self.inputnodes[i] - exitnode = self.getnode(op.args[i]) + exitnode = self.getnode(op.getarg(i)) specnodes.append(self.intersect(inputnode, exitnode)) if not self.restart_needed: break @@ -562,9 +565,9 @@ def bridge_matches(self, nextloop_specnodes): jump_op = self.jump_op - assert len(jump_op.args) == len(nextloop_specnodes) + assert jump_op.numargs() == len(nextloop_specnodes) for i in range(len(nextloop_specnodes)): - exitnode = self.getnode(jump_op.args[i]) + exitnode = self.getnode(jump_op.getarg(i)) if not nextloop_specnodes[i].matches_instance_node(exitnode): return False return True Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/intbounds.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/intbounds.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/intbounds.py Thu Sep 16 13:52:39 2010 @@ -25,7 +25,7 @@ b = v.intbound if b.has_lower and b.has_upper and b.lower == b.upper: v.make_constant(ConstInt(b.lower)) - + try: op = self.optimizer.producer[box] except KeyError: @@ -35,19 +35,19 @@ if opnum == value: func(self, op) break - + def optimize_GUARD_TRUE(self, op): self.emit_operation(op) - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) optimize_GUARD_FALSE = optimize_GUARD_TRUE optimize_GUARD_VALUE = optimize_GUARD_TRUE def optimize_INT_AND(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) - + r = self.getvalue(op.result) if v2.is_constant(): val = v2.box.getint() @@ -57,31 +57,31 @@ val = v1.box.getint() if val >= 0: r.intbound.intersect(IntBound(0,val)) - + def optimize_INT_SUB(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(v1.intbound.sub_bound(v2.intbound)) - + def optimize_INT_ADD(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(v1.intbound.add_bound(v2.intbound)) def optimize_INT_MUL(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(v1.intbound.mul_bound(v2.intbound)) def optimize_INT_ADD_OVF(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.add_bound(v2.intbound) if resbound.has_lower and resbound.has_upper and \ self.nextop().opnum == rop.GUARD_NO_OVERFLOW: @@ -93,10 +93,10 @@ self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(resbound) - + def optimize_INT_SUB_OVF(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.sub_bound(v2.intbound) if resbound.has_lower and resbound.has_upper and \ self.nextop().opnum == rop.GUARD_NO_OVERFLOW: @@ -110,8 +110,8 @@ r.intbound.intersect(resbound) def optimize_INT_MUL_OVF(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.mul_bound(v2.intbound) if resbound.has_lower and resbound.has_upper and \ self.nextop().opnum == rop.GUARD_NO_OVERFLOW: @@ -123,10 +123,10 @@ self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(resbound) - + def optimize_INT_LT(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_ge(v2.intbound): @@ -135,8 +135,8 @@ self.emit_operation(op) def optimize_INT_GT(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_le(v2.intbound): @@ -145,8 +145,8 @@ self.emit_operation(op) def optimize_INT_LE(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_le(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_gt(v2.intbound): @@ -155,8 +155,8 @@ self.emit_operation(op) def optimize_INT_GE(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_ge(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_lt(v2.intbound): @@ -165,134 +165,134 @@ self.emit_operation(op) def optimize_INT_EQ(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 0) elif v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 0) - else: + else: self.emit_operation(op) - + def optimize_INT_NE(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 1) - else: + else: self.emit_operation(op) - - def make_int_lt(self, args): - v1 = self.getvalue(args[0]) - v2 = self.getvalue(args[1]) + + def make_int_lt(self, box1, box2): + v1 = self.getvalue(box1) + v2 = self.getvalue(box2) if v1.intbound.make_lt(v2.intbound): - self.propagate_bounds_backward(args[0]) + self.propagate_bounds_backward(box1) if v2.intbound.make_gt(v1.intbound): - self.propagate_bounds_backward(args[1]) - + self.propagate_bounds_backward(box2) + - def make_int_le(self, args): - v1 = self.getvalue(args[0]) - v2 = self.getvalue(args[1]) + def make_int_le(self, box1, box2): + v1 = self.getvalue(box1) + v2 = self.getvalue(box2) if v1.intbound.make_le(v2.intbound): - self.propagate_bounds_backward(args[0]) + self.propagate_bounds_backward(box1) if v2.intbound.make_ge(v1.intbound): - self.propagate_bounds_backward(args[1]) + self.propagate_bounds_backward(box2) - def make_int_gt(self, args): - self.make_int_lt([args[1], args[0]]) + def make_int_gt(self, box1, box2): + self.make_int_lt(box2, box1) - def make_int_ge(self, args): - self.make_int_le([args[1], args[0]]) + def make_int_ge(self, box1, box2): + self.make_int_le(box2, box1) def propagate_bounds_INT_LT(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - self.make_int_lt(op.args) + self.make_int_lt(op.getarg(0), op.getarg(1)) else: - self.make_int_ge(op.args) + self.make_int_ge(op.getarg(0), op.getarg(1)) def propagate_bounds_INT_GT(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - self.make_int_gt(op.args) + self.make_int_gt(op.getarg(0), op.getarg(1)) else: - self.make_int_le(op.args) + self.make_int_le(op.getarg(0), op.getarg(1)) def propagate_bounds_INT_LE(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - self.make_int_le(op.args) + self.make_int_le(op.getarg(0), op.getarg(1)) else: - self.make_int_gt(op.args) + self.make_int_gt(op.getarg(0), op.getarg(1)) def propagate_bounds_INT_GE(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - self.make_int_ge(op.args) + self.make_int_ge(op.getarg(0), op.getarg(1)) else: - self.make_int_lt(op.args) + self.make_int_lt(op.getarg(0), op.getarg(1)) def propagate_bounds_INT_EQ(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.intersect(v2.intbound): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) if v2.intbound.intersect(v1.intbound): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_NE(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_0): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.intersect(v2.intbound): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) if v2.intbound.intersect(v1.intbound): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_ADD(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) r = self.getvalue(op.result) b = r.intbound.sub_bound(v2.intbound) if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) b = r.intbound.sub_bound(v1.intbound) if v2.intbound.intersect(b): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_SUB(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) r = self.getvalue(op.result) b = r.intbound.add_bound(v2.intbound) if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) b = r.intbound.sub_bound(v1.intbound).mul(-1) if v2.intbound.intersect(b): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_MUL(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) r = self.getvalue(op.result) b = r.intbound.div_bound(v2.intbound) if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) b = r.intbound.div_bound(v1.intbound) if v2.intbound.intersect(b): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) propagate_bounds_INT_ADD_OVF = propagate_bounds_INT_ADD propagate_bounds_INT_SUB_OVF = propagate_bounds_INT_SUB @@ -300,4 +300,4 @@ optimize_ops = _findall(OptIntBounds, 'optimize_') propagate_bounds_ops = _findall(OptIntBounds, 'propagate_bounds_') - + Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py Thu Sep 16 13:52:39 2010 @@ -16,12 +16,12 @@ LEVEL_UNKNOWN = '\x00' LEVEL_NONNULL = '\x01' LEVEL_KNOWNCLASS = '\x02' # might also mean KNOWNARRAYDESCR, for arrays -LEVEL_CONSTANT = '\x03' +LEVEL_CONSTANT = '\x03' import sys MAXINT = sys.maxint MININT = -sys.maxint - 1 - + class OptValue(object): _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound') last_guard_index = -1 @@ -36,7 +36,7 @@ if isinstance(box, Const): self.make_constant(box) # invariant: box is a Const if and only if level == LEVEL_CONSTANT - + def force_box(self): return self.box @@ -171,7 +171,7 @@ def new_const_item(self, arraydescr): return self.optimizer.new_const_item(arraydescr) - + def pure(self, opnum, args, result): op = ResOperation(opnum, args, result) self.optimizer.pure_operations[self.optimizer.make_args_key(op)] = op @@ -184,7 +184,7 @@ def setup(self, virtuals): pass - + class Optimizer(Optimization): def __init__(self, metainterp_sd, loop, optimizations=[], virtuals=True): @@ -199,7 +199,7 @@ self.pure_operations = args_dict() self.producer = {} self.pendingfields = [] - + if len(optimizations) == 0: self.first_optimization = self else: @@ -323,11 +323,11 @@ self._emit_operation(op) def _emit_operation(self, op): - for i in range(len(op.args)): - arg = op.args[i] + for i in range(op.numargs()): + arg = op.getarg(i) if arg in self.values: box = self.values[arg].force_box() - op.args[i] = box + op.setarg(i, box) self.metainterp_sd.profiler.count(jitprof.OPT_OPS) if op.is_guard(): self.metainterp_sd.profiler.count(jitprof.OPT_GUARDS) @@ -368,14 +368,16 @@ descr.make_a_counter_per_value(op) def make_args_key(self, op): - args = op.args[:] - for i in range(len(args)): - arg = args[i] + args = [] + for i in range(op.numargs()): + arg = op.getarg(i) if arg in self.values: - args[i] = self.values[arg].get_key_box() + args.append(self.values[arg].get_key_box()) + else: + args.append(arg) args.append(ConstInt(op.opnum)) return args - + def optimize_default(self, op): canfold = op.is_always_pure() is_ovf = op.is_ovf() @@ -383,8 +385,8 @@ nextop = self.loop.operations[self.i + 1] canfold = nextop.opnum == rop.GUARD_NO_OVERFLOW if canfold: - for arg in op.args: - if self.get_constant_box(arg) is None: + for i in range(op.numargs()): + if self.get_constant_box(op.getarg(i)) is None: break else: # all constant arguments: constant-fold away Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py Thu Sep 16 13:52:39 2010 @@ -67,16 +67,16 @@ return False def optimize_INT_AND(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.is_null() or v2.is_null(): self.make_constant_int(op.result, 0) else: self.emit_operation(op) def optimize_INT_OR(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.is_null(): self.make_equal_to(op.result, v2) elif v2.is_null(): @@ -85,20 +85,20 @@ self.emit_operation(op) def optimize_INT_SUB(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v2.is_constant() and v2.box.getint() == 0: self.make_equal_to(op.result, v1) else: self.emit_operation(op) # Synthesize the reverse ops for optimize_default to reuse - self.pure(rop.INT_ADD, [op.result, op.args[1]], op.args[0]) - self.pure(rop.INT_SUB, [op.args[0], op.result], op.args[1]) + self.pure(rop.INT_ADD, [op.result, op.getarg(1)], op.getarg(0)) + self.pure(rop.INT_SUB, [op.getarg(0), op.result], op.getarg(1)) def optimize_INT_ADD(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) # If one side of the op is 0 the result is the other side. if v1.is_constant() and v1.box.getint() == 0: @@ -109,12 +109,12 @@ self.emit_operation(op) # Synthesize the reverse op for optimize_default to reuse - self.pure(rop.INT_SUB, [op.result, op.args[1]], op.args[0]) - self.pure(rop.INT_SUB, [op.result, op.args[0]], op.args[1]) + self.pure(rop.INT_SUB, [op.result, op.getarg(1)], op.getarg(0)) + self.pure(rop.INT_SUB, [op.result, op.getarg(0)], op.getarg(1)) def optimize_INT_MUL(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) # If one side of the op is 1 the result is the other side. if v1.is_constant() and v1.box.getint() == 1: @@ -133,13 +133,13 @@ break else: # all constant arguments: constant-fold away - self.make_constant(op.result, op.args[0]) + self.make_constant(op.result, op.getarg(0)) return # replace CALL_PURE with just CALL self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result, op.descr)) def optimize_guard(self, op, constbox, emit_operation=True): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_constant(): box = value.box assert isinstance(box, Const) @@ -151,7 +151,7 @@ value.make_constant(constbox) def optimize_GUARD_ISNULL(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_null(): return elif value.is_nonnull(): @@ -160,7 +160,7 @@ value.make_constant(self.optimizer.cpu.ts.CONST_NULL) def optimize_GUARD_NONNULL(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_nonnull(): return elif value.is_null(): @@ -169,7 +169,7 @@ value.make_nonnull(len(self.optimizer.newoperations) - 1) def optimize_GUARD_VALUE(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) emit_operation = True if value.last_guard_index != -1: # there already has been a guard_nonnull or guard_class or @@ -178,7 +178,7 @@ old_guard_op = self.optimizer.newoperations[value.last_guard_index] old_opnum = old_guard_op.opnum old_guard_op.opnum = rop.GUARD_VALUE - old_guard_op.args = [old_guard_op.args[0], op.args[1]] + old_guard_op.args = [old_guard_op.getarg(0), op.getarg(1)] # hack hack hack. Change the guard_opnum on # old_guard_op.descr so that when resuming, # the operation is not skipped by pyjitpl.py. @@ -187,7 +187,7 @@ descr.guard_opnum = rop.GUARD_VALUE descr.make_a_counter_per_value(old_guard_op) emit_operation = False - constbox = op.args[1] + constbox = op.getarg(1) assert isinstance(constbox, Const) self.optimize_guard(op, constbox, emit_operation) @@ -198,8 +198,8 @@ self.optimize_guard(op, CONST_0) def optimize_GUARD_CLASS(self, op): - value = self.getvalue(op.args[0]) - expectedclassbox = op.args[1] + value = self.getvalue(op.getarg(0)) + expectedclassbox = op.getarg(1) assert isinstance(expectedclassbox, Const) realclassbox = value.get_constant_class(self.optimizer.cpu) if realclassbox is not None: @@ -217,7 +217,7 @@ # it was a guard_nonnull, which we replace with a # guard_nonnull_class. old_guard_op.opnum = rop.GUARD_NONNULL_CLASS - old_guard_op.args = [old_guard_op.args[0], op.args[1]] + old_guard_op.args = [old_guard_op.getarg(0), op.getarg(1)] # hack hack hack. Change the guard_opnum on # old_guard_op.descr so that when resuming, # the operation is not skipped by pyjitpl.py. @@ -239,11 +239,11 @@ self.optimizer.exception_might_have_happened = False def optimize_CALL_LOOPINVARIANT(self, op): - funcvalue = self.getvalue(op.args[0]) + funcvalue = self.getvalue(op.getarg(0)) if not funcvalue.is_constant(): self.emit_operation(op) return - key = make_hashable_int(op.args[0].getint()) + key = make_hashable_int(op.getarg(0).getint()) resvalue = self.optimizer.loop_invariant_results.get(key, None) if resvalue is not None: self.make_equal_to(op.result, resvalue) @@ -265,17 +265,17 @@ self.emit_operation(op) def optimize_INT_IS_TRUE(self, op): - if self.getvalue(op.args[0]) in self.optimizer.bool_boxes: - self.make_equal_to(op.result, self.getvalue(op.args[0])) + if self.getvalue(op.getarg(0)) in self.optimizer.bool_boxes: + self.make_equal_to(op.result, self.getvalue(op.getarg(0))) return - self._optimize_nullness(op, op.args[0], True) + self._optimize_nullness(op, op.getarg(0), True) def optimize_INT_IS_ZERO(self, op): - self._optimize_nullness(op, op.args[0], False) + self._optimize_nullness(op, op.getarg(0), False) def _optimize_oois_ooisnot(self, op, expect_isnot): - value0 = self.getvalue(op.args[0]) - value1 = self.getvalue(op.args[1]) + value0 = self.getvalue(op.getarg(0)) + value1 = self.getvalue(op.getarg(1)) if value0.is_virtual(): if value1.is_virtual(): intres = (value0 is value1) ^ expect_isnot @@ -285,9 +285,9 @@ elif value1.is_virtual(): self.make_constant_int(op.result, expect_isnot) elif value1.is_null(): - self._optimize_nullness(op, op.args[0], expect_isnot) + self._optimize_nullness(op, op.getarg(0), expect_isnot) elif value0.is_null(): - self._optimize_nullness(op, op.args[1], expect_isnot) + self._optimize_nullness(op, op.getarg(1), expect_isnot) elif value0 is value1: self.make_constant_int(op.result, not expect_isnot) else: @@ -308,7 +308,7 @@ self._optimize_oois_ooisnot(op, False) def optimize_INSTANCEOF(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) realclassbox = value.get_constant_class(self.optimizer.cpu) if realclassbox is not None: checkclassbox = self.optimizer.cpu.typedescr2classbox(op.descr) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py Thu Sep 16 13:52:39 2010 @@ -258,7 +258,7 @@ def setup(self, virtuals): if not virtuals: return - + inputargs = self.optimizer.loop.inputargs specnodes = self.optimizer.loop.token.specnodes assert len(inputargs) == len(specnodes) @@ -288,15 +288,15 @@ target_loop_token = orgop.descr assert isinstance(target_loop_token, LoopToken) specnodes = target_loop_token.specnodes - assert len(op.args) == len(specnodes) + assert op.numargs() == len(specnodes) for i in range(len(specnodes)): - value = self.getvalue(op.args[i]) + value = self.getvalue(op.getarg(i)) specnodes[i].teardown_virtual_node(self, value, exitargs) op.args = exitargs[:] self.emit_operation(op) def optimize_VIRTUAL_REF(self, op): - indexbox = op.args[1] + indexbox = op.getarg(1) # # get some constants vrefinfo = self.optimizer.metainterp_sd.virtualref_info @@ -322,17 +322,17 @@ # typically a PyPy PyFrame, and now is the end of its execution, so # forcing it now does not have catastrophic effects. vrefinfo = self.optimizer.metainterp_sd.virtualref_info - # op.args[1] should really never point to null here + # op.getarg(1) should really never point to null here # - set 'forced' to point to the real object op1 = ResOperation(rop.SETFIELD_GC, op.args, None, descr = vrefinfo.descr_forced) self.optimize_SETFIELD_GC(op1) # - set 'virtual_token' to TOKEN_NONE - args = [op.args[0], ConstInt(vrefinfo.TOKEN_NONE)] + args = [op.getarg(0), ConstInt(vrefinfo.TOKEN_NONE)] op1 = ResOperation(rop.SETFIELD_GC, args, None, descr = vrefinfo.descr_virtual_token) self.optimize_SETFIELD_GC(op1) - # Note that in some cases the virtual in op.args[1] has been forced + # Note that in some cases the virtual in op.getarg(1) has been forced # already. This is fine. In that case, and *if* a residual # CALL_MAY_FORCE suddenly turns out to access it, then it will # trigger a ResumeGuardForcedDescr.handle_async_forcing() which @@ -340,7 +340,7 @@ # was already forced). def optimize_GETFIELD_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_virtual(): # optimizefindnode should ensure that fieldvalue is found assert isinstance(value, AbstractVirtualValue) @@ -357,8 +357,8 @@ optimize_GETFIELD_GC_PURE = optimize_GETFIELD_GC def optimize_SETFIELD_GC(self, op): - value = self.getvalue(op.args[0]) - fieldvalue = self.getvalue(op.args[1]) + value = self.getvalue(op.getarg(0)) + fieldvalue = self.getvalue(op.getarg(1)) if value.is_virtual(): value.setfield(op.descr, fieldvalue) else: @@ -367,17 +367,17 @@ self.emit_operation(op) def optimize_NEW_WITH_VTABLE(self, op): - self.make_virtual(op.args[0], op.result, op) + self.make_virtual(op.getarg(0), op.result, op) def optimize_NEW(self, op): self.make_vstruct(op.descr, op.result, op) def optimize_NEW_ARRAY(self, op): - sizebox = self.get_constant_box(op.args[0]) + sizebox = self.get_constant_box(op.getarg(0)) if sizebox is not None: # if the original 'op' did not have a ConstInt as argument, # build a new one with the ConstInt argument - if not isinstance(op.args[0], ConstInt): + if not isinstance(op.getarg(0), ConstInt): op = ResOperation(rop.NEW_ARRAY, [sizebox], op.result, descr=op.descr) self.make_varray(op.descr, sizebox.getint(), op.result, op) @@ -386,7 +386,7 @@ self.emit_operation(op) def optimize_ARRAYLEN_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_virtual(): self.make_constant_int(op.result, value.getlength()) else: @@ -395,9 +395,9 @@ self.emit_operation(op) def optimize_GETARRAYITEM_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_virtual(): - indexbox = self.get_constant_box(op.args[1]) + indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: itemvalue = value.getitem(indexbox.getint()) self.make_equal_to(op.result, itemvalue) @@ -411,22 +411,22 @@ optimize_GETARRAYITEM_GC_PURE = optimize_GETARRAYITEM_GC def optimize_SETARRAYITEM_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_virtual(): - indexbox = self.get_constant_box(op.args[1]) + indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: - value.setitem(indexbox.getint(), self.getvalue(op.args[2])) + value.setitem(indexbox.getint(), self.getvalue(op.getarg(2))) return value.ensure_nonnull() ###self.heap_op_optimizer.optimize_SETARRAYITEM_GC(op, value, fieldvalue) self.emit_operation(op) def optimize_ARRAYCOPY(self, op): - source_value = self.getvalue(op.args[2]) - dest_value = self.getvalue(op.args[3]) - source_start_box = self.get_constant_box(op.args[4]) - dest_start_box = self.get_constant_box(op.args[5]) - length = self.get_constant_box(op.args[6]) + source_value = self.getvalue(op.getarg(2)) + dest_value = self.getvalue(op.getarg(3)) + source_start_box = self.get_constant_box(op.getarg(4)) + dest_start_box = self.get_constant_box(op.getarg(5)) + length = self.get_constant_box(op.getarg(6)) if (source_value.is_virtual() and source_start_box and dest_start_box and length and dest_value.is_virtual()): # XXX optimize the case where dest value is not virtual, @@ -439,9 +439,10 @@ return if length and length.getint() == 0: return # 0-length arraycopy - descr = op.args[0] + descr = op.getarg(0) assert isinstance(descr, AbstractDescr) - self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result, + args = [op.getarg(i) for i in range(1, op.numargs())] + self.emit_operation(ResOperation(rop.CALL, args, op.result, descr)) def propagate_forward(self, op): Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py Thu Sep 16 13:52:39 2010 @@ -159,7 +159,7 @@ if got_type == history.INT: self.registers_i[target_index] = resultbox elif got_type == history.REF: - #debug_print(' ->', + #debug_print(' ->', # llmemory.cast_ptr_to_adr(resultbox.getref_base())) self.registers_r[target_index] = resultbox elif got_type == history.FLOAT: @@ -446,7 +446,7 @@ def opimpl_newlist(self, structdescr, lengthdescr, itemsdescr, arraydescr, sizebox): sbox = self.metainterp.execute_and_record(rop.NEW, structdescr) - self.metainterp.execute_and_record(rop.SETFIELD_GC, lengthdescr, + self.metainterp.execute_and_record(rop.SETFIELD_GC, lengthdescr, sbox, sizebox) abox = self.metainterp.execute_and_record(rop.NEW_ARRAY, arraydescr, sizebox) @@ -1004,7 +1004,7 @@ resumedescr = compile.ResumeGuardDescr(metainterp_sd, original_greenkey) guard_op = metainterp.history.record(opnum, moreargs, None, - descr=resumedescr) + descr=resumedescr) virtualizable_boxes = None if metainterp.jitdriver_sd.virtualizable_info is not None: virtualizable_boxes = metainterp.virtualizable_boxes @@ -1463,7 +1463,7 @@ resbox = self._record_helper_nonpure_varargs(opnum, resbox, descr, argboxes) return resbox - def _record_helper_pure(self, opnum, resbox, descr, *argboxes): + def _record_helper_pure(self, opnum, resbox, descr, *argboxes): canfold = self._all_constants(*argboxes) if canfold: resbox = resbox.constbox() # ensure it is a Const @@ -1472,7 +1472,7 @@ resbox = resbox.nonconstbox() # ensure it is a Box return self._record_helper_nonpure_varargs(opnum, resbox, descr, list(argboxes)) - def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes): + def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes): canfold = self._all_constants_varargs(argboxes) if canfold: resbox = resbox.constbox() # ensure it is a Const @@ -1485,7 +1485,7 @@ assert resbox is None or isinstance(resbox, Box) # record the operation profiler = self.staticdata.profiler - profiler.count_ops(opnum, RECORDED_OPS) + profiler.count_ops(opnum, RECORDED_OPS) op = self.history.record(opnum, argboxes, resbox, descr) self.attach_debug_info(op) return resbox @@ -1667,7 +1667,7 @@ # Search in current_merge_points for original_boxes with compatible # green keys, representing the beginning of the same loop as the one - # we end now. + # we end now. num_green_args = self.jitdriver_sd.num_green_args for j in range(len(self.current_merge_points)-1, -1, -1): @@ -2090,8 +2090,8 @@ op = self.history.operations[-1] assert op.opnum == rop.CALL resbox_as_const = resbox.constbox() - for arg in op.args: - if not isinstance(arg, Const): + for i in op.numarg(): + if not isinstance(op.getarg(i), Const): break else: # all-constants: remove the CALL operation now and propagate a @@ -2101,7 +2101,8 @@ # not all constants (so far): turn CALL into CALL_PURE, which might # be either removed later by optimizeopt or turned back into CALL. op.opnum = rop.CALL_PURE - op.args = [resbox_as_const] + op.args + # XXX XXX replace... + op._args = [resbox_as_const] + op._args return resbox def direct_assembler_call(self, targetjitdriver_sd): Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py Thu Sep 16 13:52:39 2010 @@ -15,12 +15,21 @@ make_sure_not_resized(args) assert isinstance(opnum, int) self.opnum = opnum - self.args = list(args) - make_sure_not_resized(self.args) + self._args = list(args) + make_sure_not_resized(self._args) assert not isinstance(result, list) self.result = result self.setdescr(descr) + def getarg(self, i): + return self._args[i] + + def setarg(self, i, box): + self._args[i] = box + + def numargs(self): + return len(self._args) + def setdescr(self, descr): # for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt # instance provided by the backend holding details about the type @@ -35,10 +44,10 @@ descr = self.descr if descr is not None: descr = descr.clone_if_mutable() - op = ResOperation(self.opnum, self.args, self.result, descr) + op = ResOperation(self.opnum, self._args, self.result, descr) op.fail_args = self.fail_args + op.name = self.name if not we_are_translated(): - op.name = self.name op.pc = self.pc return op @@ -57,10 +66,10 @@ prefix = "" if self.descr is None or we_are_translated(): return '%s%s%s(%s)' % (prefix, sres, self.getopname(), - ', '.join([str(a) for a in self.args])) + ', '.join([str(a) for a in self._args])) else: return '%s%s%s(%s, descr=%r)' % (prefix, sres, self.getopname(), - ', '.join([str(a) for a in self.args]), self.descr) + ', '.join([str(a) for a in self._args]), self.descr) def getopname(self): try: Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py Thu Sep 16 13:52:39 2010 @@ -33,7 +33,7 @@ self.profiler = EmptyProfiler() self.options = Fake() self.globaldata = Fake() - + def test_store_final_boxes_in_guard(): from pypy.jit.metainterp.compile import ResumeGuardDescr from pypy.jit.metainterp.resume import tag, TAGBOX @@ -75,7 +75,7 @@ assert lst3 == [LLtypeMixin.valuedescr] lst4 = virt1._get_field_descr_list() assert lst3 is lst4 - + virt2 = virtualize.AbstractVirtualStructValue(opt, None) lst5 = virt2._get_field_descr_list() assert lst5 is lst1 @@ -141,8 +141,10 @@ txt1 = txt1[39:] txt2 = txt2[39:] assert op1.opnum == op2.opnum - assert len(op1.args) == len(op2.args) - for x, y in zip(op1.args, op2.args): + assert op1.numargs() == op2.numargs() + for i in range(op1.numargs()): + x = op1.getarg(i) + y = op2.getarg(i) assert x == remap.get(y, y) if op2.result in remap: assert op1.result == remap[op2.result] @@ -489,7 +491,7 @@ jump() """ self.optimize_loop(ops, 'Constant(myptr)', expected) - + def test_ooisnull_oononnull_1(self): ops = """ [p0] @@ -842,7 +844,7 @@ jump(f, f1) """ self.optimize_loop(ops, 'Not, Virtual(node_vtable, floatdescr=Not)', - expected, checkspecnodes=False) + expected, checkspecnodes=False) def test_virtual_2(self): ops = """ @@ -2171,7 +2173,7 @@ jump(i1, i0) """ self.optimize_loop(ops, 'Not, Not', expected) - + def test_fold_partially_constant_ops(self): ops = """ [i0] @@ -2183,7 +2185,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + ops = """ [i0] i1 = int_add(i0, 0) @@ -2194,7 +2196,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + ops = """ [i0] i1 = int_add(0, i0) @@ -2205,7 +2207,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + # ---------- def make_fail_descr(self): @@ -3119,7 +3121,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_noguard(self): ops = """ [i0] @@ -3134,7 +3136,7 @@ jump(i2) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_noopt(self): ops = """ [i0] @@ -3153,7 +3155,7 @@ jump(4) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_rev(self): ops = """ [i0] @@ -3170,7 +3172,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_tripple(self): ops = """ [i0] @@ -3189,7 +3191,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_add(self): ops = """ [i0] @@ -3204,11 +3206,11 @@ [i0] i1 = int_lt(i0, 4) guard_true(i1) [] - i2 = int_add(i0, 10) + i2 = int_add(i0, 10) jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_add_before(self): ops = """ [i0] @@ -3227,7 +3229,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_add_ovf(self): ops = """ [i0] @@ -3243,11 +3245,11 @@ [i0] i1 = int_lt(i0, 4) guard_true(i1) [] - i2 = int_add(i0, 10) + i2 = int_add(i0, 10) jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_add_ovf_before(self): ops = """ [i0] @@ -3268,7 +3270,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_sub(self): ops = """ [i0] @@ -3283,11 +3285,11 @@ [i0] i1 = int_lt(i0, 4) guard_true(i1) [] - i2 = int_sub(i0, 10) + i2 = int_sub(i0, 10) jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_sub_before(self): ops = """ [i0] @@ -3306,7 +3308,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_ltle(self): ops = """ [i0] @@ -3357,7 +3359,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_gtge(self): ops = """ [i0] @@ -3374,7 +3376,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_gegt(self): ops = """ [i0] @@ -3558,7 +3560,7 @@ i14 = int_gt(i1, 10) guard_true(i14) [] i15 = int_ge(i1, 20) - guard_true(i15) [] + guard_true(i15) [] jump(i1) """ expected = """ @@ -3571,7 +3573,7 @@ i14 = int_gt(i1, 10) guard_true(i14) [] i15 = int_ge(i1, 20) - guard_true(i15) [] + guard_true(i15) [] jump(i1) """ self.optimize_loop(ops, 'Not', expected) @@ -3818,7 +3820,7 @@ """ self.optimize_loop(ops, 'Not, Not', expected) - + ##class TestOOtype(BaseTestOptimizeOpt, OOtypeMixin): @@ -3835,7 +3837,7 @@ ## jump(1) ## """ ## self.optimize_loop(ops, 'Not', expected) - + ## def test_instanceof_guard_class(self): ## ops = """ ## [i0, p0] From cfbolz at codespeak.net Thu Sep 16 14:06:06 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Thu, 16 Sep 2010 14:06:06 +0200 (CEST) Subject: [pypy-svn] r77108 - in pypy/branch/better-map-instances/pypy/objspace/std: . test Message-ID: <20100916120606.7AAE3282C23@codespeak.net> Author: cfbolz Date: Thu Sep 16 14:06:05 2010 New Revision: 77108 Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py Log: make compact classes only for subclasses of object Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Thu Sep 16 14:06:05 2010 @@ -4,6 +4,7 @@ from pypy.objspace.std.dictmultiobject import W_DictMultiObject from pypy.objspace.std.dictmultiobject import IteratorImplementation from pypy.objspace.std.dictmultiobject import _is_sane_hash +from pypy.objspace.std.objectobject import W_ObjectObject # ____________________________________________________________ # attribute shapes @@ -376,6 +377,8 @@ pass # mainly for tests def get_subclass_of_correct_size(space, cls, supercls, w_type): + if not hasattr(supercls, "_init_empty"): + return supercls # not a mapdict class assert space.config.objspace.std.withmapdict map = w_type.terminator classes = memo_get_subclass_of_correct_size(space, supercls) @@ -388,23 +391,29 @@ return classes[-1] get_subclass_of_correct_size._annspecialcase_ = "specialize:arg(1)" +NUM_SUBCLASSES = 10 # XXX tweak this number def memo_get_subclass_of_correct_size(space, supercls): key = space, supercls try: return _subclass_cache[key] except KeyError: - result = [] - for i in range(1, 11): # XXX tweak this number - result.append(_make_subclass_size_n(supercls, i)) + if (not issubclass(supercls, W_ObjectObject) or + hasattr(supercls, '__del__')): + class subcls(ObjectMixin, supercls): + pass + subcls.__name__ = supercls.__name__ + "Concrete" + result = [subcls] * NUM_SUBCLASSES + else: + result = [] + for i in range(1, NUM_SUBCLASSES+1): + result.append(_make_subclass_size_n(supercls, i)) _subclass_cache[key] = result return result memo_get_subclass_of_correct_size._annspecialcase_ = "specialize:memo" _subclass_cache = {} def _make_subclass_size_n(supercls, n): - if not hasattr(supercls, "_init_empty"): - return supercls from pypy.rlib import unroll, rerased rangen = unroll.unrolling_iterable(range(n)) nmin1 = n - 1 Modified: pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py Thu Sep 16 14:06:05 2010 @@ -358,8 +358,9 @@ def test_specialized_class(): + from pypy.objspace.std.objectobject import W_ObjectObject from pypy.rlib import rerased - class Object(BaseMapdictObject): + class Object(BaseMapdictObject, W_ObjectObject): # slightly evil class typedef: hasdict = False classes = memo_get_subclass_of_correct_size(space, Object) From arigo at codespeak.net Thu Sep 16 14:14:09 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 16 Sep 2010 14:14:09 +0200 (CEST) Subject: [pypy-svn] r77109 - in pypy/branch/gen2-gc/pypy/rpython/memory: . gc gc/test test Message-ID: <20100916121409.BB3A3282C23@codespeak.net> Author: arigo Date: Thu Sep 16 14:14:08 2010 New Revision: 77109 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py pypy/branch/gen2-gc/pypy/rpython/memory/support.py pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py pypy/branch/gen2-gc/pypy/rpython/memory/test/test_support.py Log: A mix of features: * a test and fix for varsized mallocs * count more systematically the size of objects * for this GC, there is a simpler way to do id() * ...and identityhash(): don't needed to make objects grow a Signed * implement finalizers, copying the algo from SemiSpaceGC Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Thu Sep 16 14:14:08 2010 @@ -1,10 +1,12 @@ +import sys from pypy.rpython.lltypesystem import lltype, llmemory, llarena, llgroup from pypy.rpython.lltypesystem.lloperation import llop -from pypy.rpython.memory.gc.base import MovingGCBase +from pypy.rpython.memory.gc.base import GCBase, MovingGCBase from pypy.rpython.memory.gc import minimarkpage from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE -from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask +from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask, r_uint from pypy.rlib.debug import ll_assert, debug_print +from pypy.rlib.objectmodel import we_are_translated WORD = LONG_BIT // 8 @@ -26,10 +28,16 @@ # The following flag is set on surviving objects during a major collection. GCFLAG_VISITED = first_gcflag << 2 -# The following flag is set on objects that have an extra hash field, -# except on nursery objects, where it means that it *will* grow a hash -# field when moving. -GCFLAG_HASHFIELD = first_gcflag << 3 +# The following flag is set on nursery objects of which we asked the id +# or the identityhash. It means that a space of the size of the object +# has already been allocated in the nonmovable part. The same flag is +# abused to mark prebuilt objects whose hash has been taken during +# translation and is statically recorded. +GCFLAG_HAS_SHADOW = first_gcflag << 3 + +# The following flag is set temporarily on some objects during a major +# collection. See pypy/doc/discussion/finalizer-order.txt +GCFLAG_FINALIZATION_ORDERING = first_gcflag << 4 # Marker set to 'tid' during a minor collection when an object from # the nursery was forwarded. @@ -43,7 +51,7 @@ inline_simple_malloc_varsize = True needs_write_barrier = True prebuilt_gc_objects_are_static_roots = False - malloc_zero_filled = True # XXX experiment with False + malloc_zero_filled = True # xxx experiment with False # All objects start with a HDR, i.e. with a field 'tid' which contains # a word. This word is divided in two halves: the lower half contains @@ -51,7 +59,10 @@ # by GCFLAG_xxx above. HDR = lltype.Struct('header', ('tid', lltype.Signed)) typeid_is_in_field = 'tid' - #withhash_flag_is_in_field = 'tid', _GCFLAG_HASH_BASE * 0x2 + #withhash_flag_is_in_field = 'tid', GCFLAG_HAS_SHADOW + # ^^^ prebuilt objects may have the flag GCFLAG_HAS_SHADOW; + # then they are one word longer, the extra word storing the hash. + # During a minor collection, the objects in the nursery that are # moved outside are changed in-place: their header is replaced with @@ -59,7 +70,7 @@ # where the object was moved. This means that all objects in the # nursery need to be at least 2 words long, but objects outside the # nursery don't need to. - minimal_size_in_nursery = llmemory.raw_malloc_usage( + minimal_size_in_nursery = ( llmemory.sizeof(HDR) + llmemory.sizeof(llmemory.Address)) @@ -78,19 +89,28 @@ "arena_size": 65536*WORD, # The maximum size of an object allocated compactly. All objects - # that are larger or equal are just allocated with raw_malloc(). + # that are larger are just allocated with raw_malloc(). "small_request_threshold": 32*WORD, + + # Full collection threshold: after a major collection, we record + # the total size consumed; and after every minor collection, if the + # total size is now more than 'major_collection_threshold' times, + # we trigger the next major collection. + "major_collection_threshold": 1.75, } def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE, nursery_size=32*WORD, page_size=16*WORD, arena_size=64*WORD, - small_request_threshold=6*WORD, + small_request_threshold=5*WORD, + major_collection_threshold=2.5, ArenaCollectionClass=None): MovingGCBase.__init__(self, config, chunk_size) + assert small_request_threshold % WORD == 0 self.nursery_size = nursery_size self.small_request_threshold = small_request_threshold + self.major_collection_threshold = major_collection_threshold self.nursery_hash_base = -1 # # The ArenaCollection() handles the nonmovable objects allocation. @@ -107,25 +127,34 @@ # A list of all prebuilt GC objects that contain pointers to the heap self.prebuilt_root_objects = self.AddressStack() # + # Support for id and identityhash: map nursery objects with + # GCFLAG_HAS_SHADOW to their future location at the next + # minor collection. + self.young_objects_shadows = self.AddressDict() + # self._init_writebarrier_logic() def setup(self): """Called at run-time to initialize the GC.""" - MovingGCBase.setup(self) + # + # Hack: MovingGCBase.setup() sets up stuff related to id(), which + # we implement differently anyway. So directly call GCBase.setup(). + GCBase.setup(self) # assert self.nursery_size > 0, "XXX" # # A list of all raw_malloced objects (the objects too large) self.rawmalloced_objects = self.AddressStack() + self.rawmalloced_total_size = r_uint(0) # - # Support for id() - self.young_objects_with_id = self.AddressDict() + # A list of all objects with finalizers. + self.objects_with_finalizers = self.AddressDeque() # # the start of the nursery: we actually allocate a tiny bit more for # the nursery than really needed, to simplify pointer arithmetic # in malloc_fixedsize_clear(). - extra = self.small_request_threshold - WORD + extra = self.small_request_threshold self.nursery = llarena.arena_malloc(self.nursery_size + extra, True) if not self.nursery: raise MemoryError("cannot allocate nursery") @@ -133,27 +162,38 @@ self.nursery_next = self.nursery # the end of the nursery: self.nursery_top = self.nursery + self.nursery_size + # initialize the threshold, a bit arbitrarily + self.next_major_collection_threshold = ( + self.nursery_size * self.major_collection_threshold) def malloc_fixedsize_clear(self, typeid, size, can_collect=True, needs_finalizer=False, contains_weakptr=False): ll_assert(can_collect, "!can_collect") - assert not needs_finalizer # XXX assert not contains_weakptr # XXX size_gc_header = self.gcheaderbuilder.size_gc_header totalsize = size_gc_header + size + rawtotalsize = llmemory.raw_malloc_usage(totalsize) + # + # If the object needs a finalizer, ask for a rawmalloc. + # The following check should be constant-folded. + if needs_finalizer: + ll_assert(not contains_weakptr, + "needs_finalizer and contains_weakptr both specified") + result = self.malloc_with_finalizer(typeid, totalsize) # - # If totalsize is greater or equal than small_request_threshold, - # ask for a rawmalloc. The following check should be constant-folded. - if llmemory.raw_malloc_usage(totalsize)>=self.small_request_threshold: - result = self.external_malloc(typeid, totalsize) + # If totalsize is greater than small_request_threshold, ask for + # a rawmalloc. The following check should be constant-folded. + elif rawtotalsize > self.small_request_threshold: + result = self._external_malloc(typeid, totalsize) # else: # If totalsize is smaller than minimal_size_in_nursery, round it # up. The following check should also be constant-folded. - if (llmemory.raw_malloc_usage(totalsize) < - llmemory.raw_malloc_usage(self.minimal_size_in_nursery)): - totalsize = self.minimal_size_in_nursery + if (rawtotalsize < + llmemory.raw_malloc_usage(self.minimal_size_in_nursery)): + totalsize = llmemory.raw_malloc_usage( + self.minimal_size_in_nursery) # # Get the memory from the nursery. If there is not enough space # there, do a collect first. @@ -182,15 +222,24 @@ # # If totalsize is greater than small_request_threshold, ask for # a rawmalloc. - if llmemory.raw_malloc_usage(totalsize)>=self.small_request_threshold: - result = self.external_malloc(typeid, totalsize) + if llmemory.raw_malloc_usage(totalsize) > self.small_request_threshold: + result = self._external_malloc(typeid, totalsize) # else: + # Round the size up to the next multiple of WORD. Note that + # this is done only if totalsize <= self.small_request_threshold, + # i.e. it cannot overflow, and it keeps the property that + # totalsize <= self.small_request_threshold. + totalsize = llarena.round_up_for_allocation(totalsize) + ll_assert(llmemory.raw_malloc_usage(totalsize) <= + self.small_request_threshold, + "round_up_for_allocation() rounded up too much?") + # # 'totalsize' should contain at least the GC header and # the length word, so it should never be smaller than - # 'minimal_size_in_nursery' so far + # 'minimal_size_in_nursery' ll_assert(llmemory.raw_malloc_usage(totalsize) >= - self.minimal_size_in_nursery, + llmemory.raw_malloc_usage(self.minimal_size_in_nursery), "malloc_varsize_clear(): totalsize < minimalsize") # # Get the memory from the nursery. If there is not enough space @@ -221,25 +270,88 @@ and finally reserve 'totalsize' bytes at the start of the now-empty nursery. """ - self.collect(0) # XXX - self.nursery_next = self.nursery + totalsize - return self.nursery + self.minor_collection() + # + if self.get_total_memory_used() > self.next_major_collection_threshold: + self.major_collection() + # + # The nursery might not be empty now, because of + # execute_finalizers(). If it is almost full again, + # we need to fix it with another call to minor_collection(). + if self.nursery_next + totalsize > self.nursery_top: + self.minor_collection() + # + else: + debug_print('minor collection') + # + result = self.nursery_next + self.nursery_next = result + totalsize + ll_assert(self.nursery_next <= self.nursery_top, "nursery overflow") + return result collect_and_reserve._dont_inline_ = True - def external_malloc(self, typeid, totalsize): - """Allocate a large object using raw_malloc().""" + def _full_collect_if_needed(self): + if self.get_total_memory_used() > self.next_major_collection_threshold: + self.collect() + + def _reserve_external_memory(self, totalsize): + """Do a raw_malloc() to get some external memory. + Note that the returned memory is not cleared.""" # result = llmemory.raw_malloc(totalsize) if not result: raise MemoryError("cannot allocate large object") + # + size_gc_header = self.gcheaderbuilder.size_gc_header + self.rawmalloced_total_size += llmemory.raw_malloc_usage(totalsize) + self.rawmalloced_objects.append(result + size_gc_header) + return result + + def _external_malloc(self, typeid, totalsize): + """Allocate a large object using raw_malloc().""" + # + # If somebody calls _external_malloc() a lot, we must eventually + # force a full collection. + self._full_collect_if_needed() + # + result = self._reserve_external_memory(totalsize) llmemory.raw_memclear(result, totalsize) self.init_gc_object(result, typeid, GCFLAG_NO_YOUNG_PTRS) + return result + _external_malloc._dont_inline_ = True + + + def _malloc_nonmovable(self, typeid, totalsize): + """Allocate an object non-movable.""" + # + # If somebody calls _malloc_nonmovable() a lot, we must eventually + # force a full collection. + self._full_collect_if_needed() + # + rawtotalsize = llmemory.raw_malloc_usage(totalsize) + if rawtotalsize <= self.small_request_threshold: + # + # Ask the ArenaCollection to do the malloc. + result = self.ac.malloc(totalsize) + # + else: + # The size asked for is too large for the ArenaCollection. + result = self._reserve_external_memory(totalsize) # + llmemory.raw_memclear(result, totalsize) + self.init_gc_object(result, typeid, GCFLAG_NO_YOUNG_PTRS) + return result + + + def malloc_with_finalizer(self, typeid, totalsize): + """Allocate an object with a finalizer.""" + # + result = self._malloc_nonmovable(typeid, totalsize) size_gc_header = self.gcheaderbuilder.size_gc_header - self.rawmalloced_objects.append(result + size_gc_header) + self.objects_with_finalizers.append(result + size_gc_header) return result - external_malloc._dont_inline_ = True + malloc_with_finalizer._dont_inline_ = True # ---------- @@ -266,9 +378,6 @@ flags |= GCFLAG_NO_HEAP_PTRS | GCFLAG_NO_YOUNG_PTRS self.init_gc_object(addr, typeid16, flags) - def _can_never_move(self, obj): - return False # approximate good enough answer for id() - def is_in_nursery(self, addr): ll_assert(llmemory.cast_adr_to_int(addr) & 1 == 0, "odd-valued (i.e. tagged) pointer unexpected here") @@ -281,6 +390,16 @@ obj = llarena.getfakearenaaddress(obj) return obj.address[0] + def can_move(self, addr): + """Overrides the parent can_move().""" + return self.is_in_nursery(addr) + + def get_total_memory_used(self): + """Return the total memory used, not counting any object in the + nursery: only objects in the ArenaCollection or raw-malloced. + """ + return self.ac.total_memory_used + self.rawmalloced_total_size + def debug_check_object(self, obj): # after a minor or major collection, no object should be in the nursery ll_assert(not self.is_in_nursery(obj), @@ -291,6 +410,9 @@ # the GCFLAG_VISITED should not be set between collections ll_assert(self.header(obj).tid & GCFLAG_VISITED == 0, "unexpected GCFLAG_VISITED") + # the GCFLAG_FINALIZATION_ORDERING should not be set between coll. + ll_assert(self.header(obj).tid & GCFLAG_FINALIZATION_ORDERING == 0, + "unexpected GCFLAG_FINALIZATION_ORDERING") # ---------- # Write barrier @@ -350,6 +472,43 @@ self.remember_young_pointer = remember_young_pointer + def assume_young_pointers(self, addr_struct): + """Called occasionally by the JIT to mean 'assume that 'addr_struct' + may now contain young pointers. + """ + XXX + objhdr = self.header(addr_struct) + if objhdr.tid & GCFLAG_NO_YOUNG_PTRS: + self.old_objects_pointing_to_young.append(addr_struct) + objhdr.tid &= ~GCFLAG_NO_YOUNG_PTRS + if objhdr.tid & GCFLAG_NO_HEAP_PTRS: + objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS + self.last_generation_root_objects.append(addr_struct) + + def writebarrier_before_copy(self, source_addr, dest_addr): + """ This has the same effect as calling writebarrier over + each element in dest copied from source, except it might reset + one of the following flags a bit too eagerly, which means we'll have + a bit more objects to track, but being on the safe side. + """ + source_hdr = self.header(source_addr) + dest_hdr = self.header(dest_addr) + if dest_hdr.tid & GCFLAG_NO_YOUNG_PTRS == 0: + return True + # ^^^ a fast path of write-barrier + # + if source_hdr.tid & GCFLAG_NO_YOUNG_PTRS == 0: + # there might be an object in source that is in nursery + self.old_objects_pointing_to_young.append(dest_addr) + dest_hdr.tid &= ~GCFLAG_NO_YOUNG_PTRS + # + if dest_hdr.tid & GCFLAG_NO_HEAP_PTRS: + if source_hdr.tid & GCFLAG_NO_HEAP_PTRS == 0: + dest_hdr.tid &= ~GCFLAG_NO_HEAP_PTRS + self.prebuilt_root_objects.append(dest_addr) + return True + + # ---------- # Nursery collection @@ -371,10 +530,9 @@ # We proceed until 'old_objects_pointing_to_young' is empty. self.collect_oldrefs_to_nursery() # - # Update the id tracking of any object that was moved out of - # the nursery. - if self.young_objects_with_id.length() > 0: - self.update_young_objects_with_id() + # Clear this mapping. + if self.young_objects_shadows.length() > 0: + self.young_objects_shadows.clear() # # Now all live nursery objects should be out, and the rest dies. # Fill the whole nursery with zero and reset the current nursery @@ -382,8 +540,8 @@ llarena.arena_reset(self.nursery, self.nursery_size, 2) self.nursery_next = self.nursery # - self.change_nursery_hash_base() - self.debug_check_consistency() # XXX expensive! + if not we_are_translated(): + self.debug_check_consistency() # xxx expensive! def collect_roots_in_nursery(self): @@ -425,8 +583,6 @@ # If 'obj' is not in the nursery, nothing to change. if not self.is_in_nursery(obj): return - #size_gc_header = self.gcheaderbuilder.size_gc_header - #print '\ttrace_drag_out', llarena.getfakearenaaddress(obj - size_gc_header), # # If 'obj' was already forwarded, change it to its forwarding address. if self.is_forwarded_marker(self.header(obj).tid): @@ -438,25 +594,29 @@ size_gc_header = self.gcheaderbuilder.size_gc_header size = self.get_size(obj) totalsize = size_gc_header + size - totalsize_incl_hash = totalsize - if self.header(obj).tid & GCFLAG_HASHFIELD: - totalsize_incl_hash += llmemory.sizeof(lltype.Signed) - # - # Allocate a new nonmovable location for it. - # Note that 'totalsize' must be < small_request_threshold, so - # 'totalsize_incl_hash <= small_request_threshold'. - newhdr = self.ac.malloc(totalsize_incl_hash) - newobj = newhdr + size_gc_header + # + if self.header(obj).tid & GCFLAG_HAS_SHADOW == 0: + # + # Common case: allocate a new nonmovable location for it. + newhdr = self.ac.malloc(totalsize) + # + else: + # The object has already a shadow. + newobj = self.young_objects_shadows.get(obj) + ll_assert(newobj, "GCFLAG_HAS_SHADOW but not shadow found") + #print 'moving object %r into shadow %r' % ( + # llarena.getfakearenaaddress(obj), + # llarena.getfakearenaaddress(newobj),) + newhdr = newobj - size_gc_header + # + # Remove the flag GCFLAG_HAS_SHADOW, so that it doesn't get + # copied to the shadow itself. + self.header(obj).tid &= ~GCFLAG_HAS_SHADOW # # Copy it. Note that references to other objects in the # nursery are kept unchanged in this step. llmemory.raw_memcopy(obj - size_gc_header, newhdr, totalsize) # - # Write the hash field too, if necessary. - if self.header(obj).tid & GCFLAG_HASHFIELD: - hash = self._compute_current_nursery_hash(obj) - (newhdr + (size_gc_header + size)).signed[0] = hash - # # Set the old object's tid to FORWARDED_MARKER and replace # the old object's content with the target address. # A bit of no-ops to convince llarena that we are changing @@ -466,6 +626,7 @@ llarena.arena_reserve(obj, llmemory.sizeof(llmemory.Address)) self.header(obj).tid = FORWARDED_MARKER obj = llarena.getfakearenaaddress(obj) + newobj = newhdr + size_gc_header obj.address[0] = newobj # # Change the original pointer to this object. @@ -486,6 +647,8 @@ def major_collection(self): """Do a major collection. Only for when the nursery is empty.""" # + debug_print('major collection:', self.get_total_memory_used()) + # # Debugging checks ll_assert(self.nursery_next == self.nursery, "nursery not empty in major_collection()") @@ -495,12 +658,18 @@ # find and free some of the objects allocated by the ArenaCollection. # We first visit all objects and toggle the flag GCFLAG_VISITED on # them, starting from the roots. + self.objects_to_trace = self.AddressStack() self.collect_roots() self.visit_all_objects() # - # Walk the 'objects_with_id' list and remove the ones that die, - # i.e. that don't have the GCFLAG_VISITED flag. - self.update_objects_with_id() + # Finalizer support: adds the flag GCFLAG_VISITED to all objects + # with a finalizer and all objects reachable from there (and also + # moves some objects from 'objects_with_finalizers' to + # 'run_finalizers'). + if self.objects_with_finalizers.non_empty(): + self.deal_with_objects_with_finalizers() + # + self.objects_to_trace.delete() # # Walk all rawmalloced objects and free the ones that don't # have the GCFLAG_VISITED flag. @@ -515,6 +684,15 @@ self.prebuilt_root_objects.foreach(self._reset_gcflag_visited, None) # self.debug_check_consistency() + # + debug_print(' ->', self.get_total_memory_used()) + self.next_major_collection_threshold = ( + self.get_total_memory_used() * self.major_collection_threshold) + # + # At the end, we can execute the finalizers of the objects + # listed in 'run_finalizers'. Note that this will typically do + # more allocations. + self.execute_finalizers() def _free_if_unvisited(self, hdr): @@ -540,6 +718,9 @@ self.header(obj).tid &= ~GCFLAG_VISITED # survives self.rawmalloced_objects.append(obj) else: + totalsize = size_gc_header + self.get_size(obj) + rawtotalsize = llmemory.raw_malloc_usage(totalsize) + self.rawmalloced_total_size -= rawtotalsize llmemory.raw_free(obj - size_gc_header) # list.delete() @@ -548,17 +729,23 @@ def collect_roots(self): # Collect all roots. Starts from all the objects # from 'prebuilt_root_objects'. - self.objects_to_trace = self.AddressStack() - self.prebuilt_root_objects.foreach(self._collect_obj, None) + self.prebuilt_root_objects.foreach(self._collect_obj, + self.objects_to_trace) # # Add the roots from the other sources. self.root_walker.walk_roots( MiniMarkGC._collect_ref, # stack roots MiniMarkGC._collect_ref, # static in prebuilt non-gc structures None) # we don't need the static in all prebuilt gc objects - - def _collect_obj(self, obj, ignored=None): - self.objects_to_trace.append(obj) + # + # If we are in an inner collection caused by a call to a finalizer, + # the 'run_finalizers' objects also need to kept alive. + self.run_finalizers.foreach(self._collect_obj, + self.objects_to_trace) + + @staticmethod + def _collect_obj(obj, objects_to_trace): + objects_to_trace.append(obj) def _collect_ref(self, root, ignored=None): self.objects_to_trace.append(root.address[0]) @@ -568,7 +755,6 @@ while pending.non_empty(): obj = pending.pop() self.visit(obj) - pending.delete() def visit(self, obj): # @@ -594,98 +780,155 @@ # ---------- - # id() support + # id() and identityhash() support - def id(self, gcobj): - """Implement id() of an object, given as a GCREF.""" + def id_or_identityhash(self, gcobj, special_case_prebuilt): + """Implement the common logic of id() and identityhash() + of an object, given as a GCREF. + """ obj = llmemory.cast_ptr_to_adr(gcobj) # - # Is it a tagged pointer? For them, the result is odd-valued. - if not self.is_valid_gc_object(obj): - return llmemory.cast_adr_to_int(obj) - # - # Is the object still in the nursery? - if self.is_in_nursery(obj): - result = self.young_objects_with_id.get(obj) - if not result: - result = self._next_id() - self.young_objects_with_id.setitem(obj, result) - else: - result = self.objects_with_id.get(obj) - if not result: - # An 'obj' not in the nursery and not in 'objects_with_id' - # did not have its id() asked for and will not move any more, - # so we can just return its address as the result. - return llmemory.cast_adr_to_int(obj) - # - # If we reach here, 'result' is an odd number. If we double it, - # we have a number of the form 4n+2, which cannot collide with - # tagged pointers nor with any real address. - return llmemory.cast_adr_to_int(result) * 2 - - - def update_young_objects_with_id(self): - # Called during a minor collection. - self.young_objects_with_id.foreach(self._update_object_id, - self.objects_with_id) - self.young_objects_with_id.clear() - # NB. the clear() also makes the dictionary shrink back to its - # minimal size, which is actually a good idea: a large, mostly-empty - # table is bad for the next call to 'foreach'. + if self.is_valid_gc_object(obj): + if self.is_in_nursery(obj): + # + # The object not a tagged pointer, and is it still in the + # nursery. Find or allocate a "shadow" object, which is + # where the object will be moved by the next minor + # collection + if self.header(obj).tid & GCFLAG_HAS_SHADOW: + shadow = self.young_objects_shadows.get(obj) + ll_assert(shadow, "GCFLAG_HAS_SHADOW but not shadow found") + else: + size_gc_header = self.gcheaderbuilder.size_gc_header + size = self.get_size(obj) + shadowhdr = self.ac.malloc(size_gc_header + size) + # initialize to an invalid tid *without* GCFLAG_VISITED, + # so that if the object dies before the next minor + # collection, the shadow will stay around but be collected + # by the next major collection. + shadow = shadowhdr + size_gc_header + self.header(shadow).tid = 0 + self.header(obj).tid |= GCFLAG_HAS_SHADOW + self.young_objects_shadows.setitem(obj, shadow) + # + # The answer is the address of the shadow. + obj = shadow + # + elif special_case_prebuilt: + if self.header(obj).tid & GCFLAG_HAS_SHADOW: + # + # For identityhash(), we need a special case for some + # prebuilt objects: their hash must be the same before + # and after translation. It is stored as an extra word + # after the object. But we cannot use it for id() + # because the stored value might clash with a real one. + size = self.get_size(obj) + return (obj + size).signed[0] + # + return llmemory.cast_adr_to_int(obj) - def _update_object_id(self, obj, id, new_objects_with_id): - if self.is_forwarded_marker(self.header(obj).tid): - newobj = self.get_forwarding_address(obj) - new_objects_with_id.setitem(newobj, id) - else: - self.id_free_list.append(id) - def _update_object_id_FAST(self, obj, id, new_objects_with_id): - # overrides the parent's version (a bit hackish) - if self.header(obj).tid & GCFLAG_VISITED: - new_objects_with_id.insertclean(obj, id) - else: - self.id_free_list.append(id) + def id(self, gcobj): + return self.id_or_identityhash(gcobj, False) + + def identityhash(self, gcobj): + return self.id_or_identityhash(gcobj, True) # ---------- - # identityhash() support + # Finalizers - def identityhash(self, gcobj): - obj = llmemory.cast_ptr_to_adr(gcobj) - if self.is_in_nursery(obj): - # - # A nursery object's identityhash is never stored with the - # object, but returned by _compute_current_nursery_hash(). - # But we must set the GCFLAG_HASHFIELD to remember that - # we will have to store it into the object when it moves. - self.header(obj).tid |= GCFLAG_HASHFIELD - return self._compute_current_nursery_hash(obj) - # - if self.header(obj).tid & GCFLAG_HASHFIELD: - # - # An non-moving object with a hash field. - objsize = self.get_size(obj) - obj = llarena.getfakearenaaddress(obj) - return (obj + objsize).signed[0] - # - # No hash field needed. - return llmemory.cast_adr_to_int(obj) + def deal_with_objects_with_finalizers(self): + # Walk over list of objects with finalizers. + # If it is not surviving, add it to the list of to-be-called + # finalizers and make it survive, to make the finalizer runnable. + # We try to run the finalizers in a "reasonable" order, like + # CPython does. The details of this algorithm are in + # pypy/doc/discussion/finalizer-order.txt. + new_with_finalizer = self.AddressDeque() + marked = self.AddressDeque() + pending = self.AddressStack() + self.tmpstack = self.AddressStack() + while self.objects_with_finalizers.non_empty(): + x = self.objects_with_finalizers.popleft() + ll_assert(self._finalization_state(x) != 1, + "bad finalization state 1") + if self.header(x).tid & GCFLAG_VISITED: + new_with_finalizer.append(x) + continue + marked.append(x) + pending.append(x) + while pending.non_empty(): + y = pending.pop() + state = self._finalization_state(y) + if state == 0: + self._bump_finalization_state_from_0_to_1(y) + self.trace(y, self._append_if_nonnull, pending) + elif state == 2: + self._recursively_bump_finalization_state_from_2_to_3(y) + self._recursively_bump_finalization_state_from_1_to_2(x) + + while marked.non_empty(): + x = marked.popleft() + state = self._finalization_state(x) + ll_assert(state >= 2, "unexpected finalization state < 2") + if state == 2: + self.run_finalizers.append(x) + # we must also fix the state from 2 to 3 here, otherwise + # we leave the GCFLAG_FINALIZATION_ORDERING bit behind + # which will confuse the next collection + self._recursively_bump_finalization_state_from_2_to_3(x) + else: + new_with_finalizer.append(x) + self.tmpstack.delete() + pending.delete() + marked.delete() + self.objects_with_finalizers.delete() + self.objects_with_finalizers = new_with_finalizer + + def _append_if_nonnull(pointer, stack): + stack.append(pointer.address[0]) + _append_if_nonnull = staticmethod(_append_if_nonnull) - def change_nursery_hash_base(self): - # The following should be enough to ensure that young objects - # tend to always get a different hash. It also makes sure that - # nursery_hash_base is not a multiple of WORD, to avoid collisions - # with the hash of non-young objects. - hash_base = self.nursery_hash_base - hash_base += self.nursery_size - 1 - if (hash_base & (WORD-1)) == 0: - hash_base -= 1 - self.nursery_hash_base = intmask(hash_base) + def _finalization_state(self, obj): + tid = self.header(obj).tid + if tid & GCFLAG_VISITED: + if tid & GCFLAG_FINALIZATION_ORDERING: + return 2 + else: + return 3 + else: + if tid & GCFLAG_FINALIZATION_ORDERING: + return 1 + else: + return 0 - def _compute_current_nursery_hash(self, obj): - return intmask(llmemory.cast_adr_to_int(obj) + self.nursery_hash_base) + def _bump_finalization_state_from_0_to_1(self, obj): + ll_assert(self._finalization_state(obj) == 0, + "unexpected finalization state != 0") + hdr = self.header(obj) + hdr.tid |= GCFLAG_FINALIZATION_ORDERING + + def _recursively_bump_finalization_state_from_2_to_3(self, obj): + ll_assert(self._finalization_state(obj) == 2, + "unexpected finalization state != 2") + pending = self.tmpstack + ll_assert(not pending.non_empty(), "tmpstack not empty") + pending.append(obj) + while pending.non_empty(): + y = pending.pop() + hdr = self.header(y) + if hdr.tid & GCFLAG_FINALIZATION_ORDERING: # state 2 ? + hdr.tid &= ~GCFLAG_FINALIZATION_ORDERING # change to state 3 + self.trace(y, self._append_if_nonnull, pending) + + def _recursively_bump_finalization_state_from_1_to_2(self, obj): + # recursively convert objects from state 1 to state 2. + # The call to visit_all_objects() will add the GCFLAG_VISITED + # recursively. + self.objects_to_trace.append(obj) + self.visit_all_objects() # ____________________________________________________________ @@ -702,6 +945,7 @@ self.page_size = page_size self.small_request_threshold = small_request_threshold self.all_objects = [] + self.total_memory_used = 0 def malloc(self, size): nsize = llmemory.raw_malloc_usage(size) @@ -710,16 +954,18 @@ ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned") # result = llarena.arena_malloc(nsize, False) - # - minimarkpage.reserve_with_hash(result, size) - self.all_objects.append(result) + llarena.arena_reserve(result, size) + self.all_objects.append((result, nsize)) + self.total_memory_used += nsize return result def mass_free(self, ok_to_free_func): objs = self.all_objects self.all_objects = [] - for rawobj in objs: + self.total_memory_used = 0 + for rawobj, nsize in objs: if ok_to_free_func(rawobj): llarena.arena_free(rawobj) else: - self.all_objects.append(rawobj) + self.all_objects.append((rawobj, nsize)) + self.total_memory_used += nsize Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py Thu Sep 16 14:14:08 2010 @@ -1,5 +1,5 @@ from pypy.rpython.lltypesystem import lltype, llmemory, llarena, rffi -from pypy.rlib.rarithmetic import LONG_BIT +from pypy.rlib.rarithmetic import LONG_BIT, r_uint from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import ll_assert @@ -74,11 +74,7 @@ self.uninitialized_pages = PAGE_NULL self.num_uninitialized_pages = 0 self.free_pages = NULL - self.used_pages = 0 # number of pages at least partially filled - - - def pages_in_use(self): - return self.used_pages + self.total_memory_used = r_uint(0) def malloc(self, size): @@ -87,6 +83,7 @@ ll_assert(nsize > 0, "malloc: size is null or negative") ll_assert(nsize <= self.small_request_threshold,"malloc: size too big") ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned") + self.total_memory_used += nsize # # Get the page to use from the size size_class = nsize / WORD @@ -123,7 +120,7 @@ page.nextpage = self.full_page_for_size[size_class] self.full_page_for_size[size_class] = page # - reserve_with_hash(result, _dummy_size(size)) + llarena.arena_reserve(result, _dummy_size(size)) return result @@ -155,7 +152,6 @@ ll_assert(self.page_for_size[size_class] == PAGE_NULL, "allocate_new_page() called but a page is already waiting") self.page_for_size[size_class] = result - self.used_pages += 1 return result @@ -190,6 +186,7 @@ """For each object, if ok_to_free_func(obj) returns True, then free the object. """ + self.total_memory_used = r_uint(0) # # For each size class: size_class = self.small_request_threshold / WORD @@ -264,7 +261,6 @@ llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address)) pageaddr.address[0] = self.free_pages self.free_pages = pageaddr - self.used_pages -= 1 def walk_page(self, page, block_size, nblocks, ok_to_free_func): @@ -316,9 +312,12 @@ obj += block_size index -= 1 # - # Update the number of free objects. + # Update the number of free objects in the page. page.nfree = nblocks - surviving # + # Update the global total size of objects. + self.total_memory_used += surviving * block_size + # # Return the number of surviving objects. return surviving @@ -346,16 +345,3 @@ if isinstance(size, int): size = llmemory.sizeof(lltype.Char) * size return size - -def reserve_with_hash(result, size): - # XXX translation - # - # Custom hack for the hash - if (isinstance(size, llmemory.CompositeOffset) and - isinstance(size.offsets[-1], llmemory.ItemOffset) and - size.offsets[-1].TYPE == lltype.Signed): - size_of_int = llmemory.sizeof(lltype.Signed) - size = sum(size.offsets[1:-1], size.offsets[0]) - llarena.arena_reserve(result + size, size_of_int) - # - llarena.arena_reserve(result, size) Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py Thu Sep 16 14:14:08 2010 @@ -336,6 +336,18 @@ assert hash == self.gc.identityhash(self.stackroots[-1]) self.stackroots.pop() + def test_memory_alignment(self): + A1 = lltype.GcArray(lltype.Char) + for i in range(50): + p1 = self.malloc(A1, i) + if i: + p1[i-1] = chr(i) + self.stackroots.append(p1) + self.gc.collect() + for i in range(1, 50): + p = self.stackroots[-50+i] + assert p[i-1] == chr(i) + class TestSemiSpaceGC(DirectGCTest): from pypy.rpython.memory.gc.semispace import SemiSpaceGC as GCClass Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Thu Sep 16 14:14:08 2010 @@ -39,27 +39,24 @@ # ac = ArenaCollection(arenasize, pagesize, 99) assert ac.num_uninitialized_pages == 0 - assert ac.used_pages == 0 + assert ac.total_memory_used == 0 # page = ac.allocate_new_page(5) checknewpage(page, 5) assert ac.num_uninitialized_pages == 2 assert ac.uninitialized_pages - pagesize == cast_ptr_to_adr(page) assert ac.page_for_size[5] == page - assert ac.used_pages == 1 # page = ac.allocate_new_page(3) checknewpage(page, 3) assert ac.num_uninitialized_pages == 1 assert ac.uninitialized_pages - pagesize == cast_ptr_to_adr(page) assert ac.page_for_size[3] == page - assert ac.used_pages == 2 # page = ac.allocate_new_page(4) checknewpage(page, 4) assert ac.num_uninitialized_pages == 0 assert ac.page_for_size[4] == page - assert ac.used_pages == 3 def arena_collection_for_test(pagesize, pagelayout, fill_with_objects=False): @@ -175,6 +172,7 @@ def test_malloc_common_case(): pagesize = hdrsize + 7*WORD ac = arena_collection_for_test(pagesize, "#23..2 ") + assert ac.total_memory_used == 0 # so far obj = ac.malloc(2*WORD); chkob(ac, 1, 4*WORD, obj) obj = ac.malloc(2*WORD); chkob(ac, 5, 4*WORD, obj) obj = ac.malloc(2*WORD); chkob(ac, 3, 0*WORD, obj) @@ -186,6 +184,7 @@ obj = ac.malloc(2*WORD); chkob(ac, 6, 0*WORD, obj) obj = ac.malloc(2*WORD); chkob(ac, 6, 2*WORD, obj) obj = ac.malloc(2*WORD); chkob(ac, 6, 4*WORD, obj) + assert ac.total_memory_used == 11*2*WORD def test_malloc_mixed_sizes(): pagesize = hdrsize + 7*WORD @@ -413,18 +412,21 @@ obj = ac.malloc(size_class * WORD) at = obj - ac._startpageaddr assert at not in live_objects - live_objects[at] = None + live_objects[at] = size_class * WORD # # Free half the objects, randomly ok_to_free = OkToFree(ac, lambda obj: random.random() < 0.5) ac.mass_free(ok_to_free) # # Check that we have seen all objects - assert dict.fromkeys(ok_to_free.seen) == live_objects + assert sorted(ok_to_free.seen) == sorted(live_objects) + surviving_total_size = 0 for at, freed in ok_to_free.seen.items(): if freed: del live_objects[at] + else: + surviving_total_size += live_objects[at] + assert ac.total_memory_used == surviving_total_size except DoneTesting: # the following output looks cool on a 112-character-wide terminal. print ac._startpageaddr.arena.usagemap - assert ac.used_pages == num_pages Modified: pypy/branch/gen2-gc/pypy/rpython/memory/support.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/support.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/support.py Thu Sep 16 14:14:08 2010 @@ -216,6 +216,24 @@ self.index_in_oldest = index + 1 return result + def foreach(self, callback, arg): + """Invoke 'callback(address, arg)' for all addresses in the deque. + Typically, 'callback' is a bound method and 'arg' can be None. + """ + chunk = self.oldest_chunk + index = self.index_in_oldest + while chunk is not self.newest_chunk: + while index < chunk_size: + callback(chunk.items[index], arg) + index += 1 + chunk = chunk.next + index = 0 + limit = self.index_in_newest + while index < limit: + callback(chunk.items[index], arg) + index += 1 + foreach._annspecialcase_ = 'specialize:arg(1)' + def delete(self): cur = self.oldest_chunk while cur: Modified: pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py Thu Sep 16 14:14:08 2010 @@ -764,3 +764,7 @@ def test_malloc_nonmovable_fixsize(self): py.test.skip("Not supported") + + +class TestMiniMarkGC(TestSemiSpaceGC): + from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass Modified: pypy/branch/gen2-gc/pypy/rpython/memory/test/test_support.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/test/test_support.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/test/test_support.py Thu Sep 16 14:14:08 2010 @@ -113,6 +113,27 @@ deque.append(x) expected.append(x) + def test_foreach(self): + AddressDeque = get_address_deque(10) + ll = AddressDeque() + for num_entries in range(30, -1, -1): + addrs = [raw_malloc(llmemory.sizeof(lltype.Signed)) + for i in range(num_entries)] + for a in addrs: + ll.append(a) + + seen = [] + def callback(addr, fortytwo): + assert fortytwo == 42 + seen.append(addr) + + ll.foreach(callback, 42) + assert seen == addrs + for a in addrs: + b = ll.popleft() + assert a == b + assert not ll.non_empty() + def test_stack_annotate(): AddressStack = get_address_stack(60) From antocuni at codespeak.net Thu Sep 16 14:41:42 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Thu, 16 Sep 2010 14:41:42 +0200 (CEST) Subject: [pypy-svn] r77110 - in pypy/branch/resoperation-refactoring/pypy/jit/metainterp: . optimizeopt Message-ID: <20100916124142.BF3E5282C23@codespeak.net> Author: antocuni Date: Thu Sep 16 14:41:40 2010 New Revision: 77110 Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/graphpage.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/heap.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py Log: (antocuni, david) use the new ResOperation interface a bit here and there until test_basic passes Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py Thu Sep 16 14:41:40 2010 @@ -238,7 +238,7 @@ def make_a_counter_per_value(self, guard_value_op): assert guard_value_op.opnum == rop.GUARD_VALUE - box = guard_value_op.args[0] + box = guard_value_op.getarg(0) try: i = guard_value_op.fail_args.index(box) except ValueError: @@ -546,7 +546,7 @@ # e.g. loop_tokens_done_with_this_frame_void[0] # Replace the operation with the real operation we want, i.e. a FINISH descr = target_loop_token.finishdescr - args = [op.getarg(i) for i in range(op.numargs())] + args = op.sliceargs(0, op.numargs()) new_op = ResOperation(rop.FINISH, args, None, descr=descr) new_loop.operations[-1] = new_op Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/graphpage.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/graphpage.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/graphpage.py Thu Sep 16 14:41:40 2010 @@ -191,7 +191,9 @@ def getlinks(self): boxes = {} for op in self.all_operations: - for box in op.args + [op.result]: + args = op.sliceargs(0, op.numargs()) + args.append(op.result) + for box in args: if getattr(box, 'is_box', False): boxes[box] = True links = {} Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py Thu Sep 16 14:41:40 2010 @@ -160,7 +160,8 @@ break else: # all constant arguments: we can constant-fold - argboxes = [self.get_constant_box(arg) for arg in op.args] + argboxes = [self.get_constant_box(op.getarg(i)) + for i in range(op.numargs())] resbox = execute_nonspec(self.cpu, None, op.opnum, argboxes, op.descr) self.set_constant_node(op.result, resbox.constbox()) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/heap.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/heap.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/heap.py Thu Sep 16 14:41:40 2010 @@ -45,7 +45,7 @@ op = self.lazy_setfields.get(descr, None) if op is None: return None - return self.getvalue(op.args[1]) + return self.getvalue(op.getarg(1)) return d.get(value, None) def cache_arrayitem_value(self, descr, value, indexvalue, fieldvalue, write=False): @@ -167,9 +167,10 @@ # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced" # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow" opnum = prevop.opnum + lastop_args = lastop.sliceargs(0, lastop.numargs()) if ((prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE or prevop.is_ovf()) - and prevop.result not in lastop.args): + and prevop.result not in lastop_args): newoperations[-2] = lastop newoperations[-1] = prevop @@ -189,9 +190,9 @@ # the only really interesting case that we need to handle in the # guards' resume data is that of a virtual object that is stored # into a field of a non-virtual object. - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) assert not value.is_virtual() # it must be a non-virtual - fieldvalue = self.getvalue(op.args[1]) + fieldvalue = self.getvalue(op.getarg(1)) if fieldvalue.is_virtual(): # this is the case that we leave to resume.py pendingfields.append((descr, value.box, @@ -207,11 +208,11 @@ if write: self.lazy_setfields_descrs.append(op.descr) else: - if self.getvalue(op1.args[0]) is not value: + if self.getvalue(op1.getarg(0)) is not value: self.force_lazy_setfield(op.descr) def optimize_GETFIELD_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) self.force_lazy_setfield_if_necessary(op, value) # check if the field was read from another getfield_gc just before # or has been written to recently @@ -228,16 +229,16 @@ self.cache_field_value(op.descr, value, fieldvalue) def optimize_SETFIELD_GC(self, op): - value = self.getvalue(op.args[0]) - fieldvalue = self.getvalue(op.args[1]) + value = self.getvalue(op.getarg(0)) + fieldvalue = self.getvalue(op.getarg(1)) self.force_lazy_setfield_if_necessary(op, value, write=True) self.lazy_setfields[op.descr] = op # remember the result of future reads of the field self.cache_field_value(op.descr, value, fieldvalue, write=True) def optimize_GETARRAYITEM_GC(self, op): - value = self.getvalue(op.args[0]) - indexvalue = self.getvalue(op.args[1]) + value = self.getvalue(op.getarg(0)) + indexvalue = self.getvalue(op.getarg(1)) fieldvalue = self.read_cached_arrayitem(op.descr, value, indexvalue) if fieldvalue is not None: self.make_equal_to(op.result, fieldvalue) @@ -249,9 +250,9 @@ def optimize_SETARRAYITEM_GC(self, op): self.emit_operation(op) - value = self.getvalue(op.args[0]) - fieldvalue = self.getvalue(op.args[2]) - indexvalue = self.getvalue(op.args[1]) + value = self.getvalue(op.getarg(0)) + fieldvalue = self.getvalue(op.getarg(2)) + indexvalue = self.getvalue(op.getarg(1)) self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue, write=True) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py Thu Sep 16 14:41:40 2010 @@ -349,12 +349,12 @@ descr.store_final_boxes(op, newboxes) # if op.opnum == rop.GUARD_VALUE: - if self.getvalue(op.args[0]) in self.bool_boxes: + if self.getvalue(op.getarg(0)) in self.bool_boxes: # Hack: turn guard_value(bool) into guard_true/guard_false. # This is done after the operation is emitted, to let # store_final_boxes_in_guard set the guard_opnum field # of the descr to the original rop.GUARD_VALUE. - constvalue = op.args[1].getint() + constvalue = op.getarg(1).getint() if constvalue == 0: opnum = rop.GUARD_FALSE elif constvalue == 1: @@ -362,7 +362,7 @@ else: raise AssertionError("uh?") op.opnum = opnum - op.args = [op.args[0]] + op.args = [op.getarg(0)] else: # a real GUARD_VALUE. Make it use one counter per value. descr.make_a_counter_per_value(op) @@ -390,7 +390,8 @@ break else: # all constant arguments: constant-fold away - argboxes = [self.get_constant_box(arg) for arg in op.args] + argboxes = [self.get_constant_box(op.getarg(i)) + for i in range(op.numargs())] resbox = execute_nonspec(self.cpu, None, op.opnum, argboxes, op.descr) self.make_constant(op.result, resbox.constbox()) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py Thu Sep 16 14:41:40 2010 @@ -128,7 +128,8 @@ self.emit_operation(op) def optimize_CALL_PURE(self, op): - for arg in op.args: + for i in range(op.numargs()): + arg = op.getarg(i) if self.get_constant_box(arg) is None: break else: @@ -136,7 +137,8 @@ self.make_constant(op.result, op.getarg(0)) return # replace CALL_PURE with just CALL - self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result, + args = op.sliceargs(1, op.numargs()) + self.emit_operation(ResOperation(rop.CALL, args, op.result, op.descr)) def optimize_guard(self, op, constbox, emit_operation=True): value = self.getvalue(op.getarg(0)) @@ -178,7 +180,8 @@ old_guard_op = self.optimizer.newoperations[value.last_guard_index] old_opnum = old_guard_op.opnum old_guard_op.opnum = rop.GUARD_VALUE - old_guard_op.args = [old_guard_op.getarg(0), op.getarg(1)] + # XXX XXX: implement it when the refactoring is complete + old_guard_op._args = [old_guard_op.getarg(0), op.getarg(1)] # hack hack hack. Change the guard_opnum on # old_guard_op.descr so that when resuming, # the operation is not skipped by pyjitpl.py. @@ -217,7 +220,8 @@ # it was a guard_nonnull, which we replace with a # guard_nonnull_class. old_guard_op.opnum = rop.GUARD_NONNULL_CLASS - old_guard_op.args = [old_guard_op.getarg(0), op.getarg(1)] + # XXX XXX: implement it when the refactoring is complete + old_guard_op._args = [old_guard_op.getarg(0), op.getarg(1)] # hack hack hack. Change the guard_opnum on # old_guard_op.descr so that when resuming, # the operation is not skipped by pyjitpl.py. Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py Thu Sep 16 14:41:40 2010 @@ -292,7 +292,7 @@ for i in range(len(specnodes)): value = self.getvalue(op.getarg(i)) specnodes[i].teardown_virtual_node(self, value, exitargs) - op.args = exitargs[:] + op.setarglist(exitargs[:]) self.emit_operation(op) def optimize_VIRTUAL_REF(self, op): @@ -441,7 +441,7 @@ return # 0-length arraycopy descr = op.getarg(0) assert isinstance(descr, AbstractDescr) - args = [op.getarg(i) for i in range(1, op.numargs())] + args = op.sliceargs(1, op.numargs()) self.emit_operation(ResOperation(rop.CALL, args, op.result, descr)) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py Thu Sep 16 14:41:40 2010 @@ -2090,7 +2090,7 @@ op = self.history.operations[-1] assert op.opnum == rop.CALL resbox_as_const = resbox.constbox() - for i in op.numarg(): + for i in range(op.numargs()): if not isinstance(op.getarg(i), Const): break else: @@ -2101,7 +2101,7 @@ # not all constants (so far): turn CALL into CALL_PURE, which might # be either removed later by optimizeopt or turned back into CALL. op.opnum = rop.CALL_PURE - # XXX XXX replace... + # XXX XXX replace when the resoperation refactoring has been finished op._args = [resbox_as_const] + op._args return resbox Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py Thu Sep 16 14:41:40 2010 @@ -21,6 +21,12 @@ self.result = result self.setdescr(descr) + # XXX: just for debugging during the refactoring, kill me + def __setattr__(self, attr, value): + if attr == 'args': + import pdb;pdb.set_trace() + object.__setattr__(self, attr, value) + def getarg(self, i): return self._args[i] @@ -30,6 +36,12 @@ def numargs(self): return len(self._args) + def setarglist(self, args): + self._args = args + + def sliceargs(self, start, stop): + return [self.getarg(i) for i in range(start, stop)] + def setdescr(self, descr): # for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt # instance provided by the backend holding details about the type Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py Thu Sep 16 14:41:40 2010 @@ -14,11 +14,13 @@ if op.opnum == rop.ARRAYCOPY: descr = op.args[0] assert isinstance(descr, AbstractDescr) - op = ResOperation(rop.CALL, op.args[1:], op.result, descr=descr) + args = op.sliceargs(1, op.numargs()) + op = ResOperation(rop.CALL, args, op.result, descr=descr) elif op.opnum == rop.CALL_PURE: - op = ResOperation(rop.CALL, op.args[1:], op.result, op.descr) + args = op.sliceargs(1, op.numargs()) + op = ResOperation(rop.CALL, args, op.result, op.descr) elif op.opnum == rop.VIRTUAL_REF: - op = ResOperation(rop.SAME_AS, [op.args[0]], op.result) + op = ResOperation(rop.SAME_AS, [op.getarg(0)], op.result) elif op.opnum == rop.VIRTUAL_REF_FINISH: return [] return [op] From arigo at codespeak.net Thu Sep 16 15:03:53 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 16 Sep 2010 15:03:53 +0200 (CEST) Subject: [pypy-svn] r77111 - pypy/branch/gen2-gc/pypy/doc/discussion Message-ID: <20100916130353.AFA0E282C23@codespeak.net> Author: arigo Date: Thu Sep 16 15:03:52 2010 New Revision: 77111 Modified: pypy/branch/gen2-gc/pypy/doc/discussion/finalizer-order.txt Log: Add a paragraph about MiniMarkGC. Modified: pypy/branch/gen2-gc/pypy/doc/discussion/finalizer-order.txt ============================================================================== --- pypy/branch/gen2-gc/pypy/doc/discussion/finalizer-order.txt (original) +++ pypy/branch/gen2-gc/pypy/doc/discussion/finalizer-order.txt Thu Sep 16 15:03:52 2010 @@ -133,8 +133,8 @@ that doesn't change the state of an object, we don't follow its children recursively. -In practice we can encode the 4 states with a single extra bit in the -header: +In practice, in the SemiSpace, Generation and Hybrid GCs, we can encode +the 4 states with a single extra bit in the header: ===== ============= ======== ==================== state is_forwarded? bit set? bit set in the copy? @@ -150,3 +150,17 @@ bit in the copy at the end, to clean up before the next collection (which means recursively bumping the state from 2 to 3 in the final loop). + +In the MiniMark GC, the objects don't move (apart from when they are +copied out of the nursery), but we use the flag GCFLAG_VISITED to mark +objects that survive, so we can also have a single extra bit for +finalizers: + + ===== ============== ============================ + state GCFLAG_VISITED GCFLAG_FINALIZATION_ORDERING + ===== ============== ============================ + 0 no no + 1 no yes + 2 yes yes + 3 yes no + ===== ============= ============================ From antocuni at codespeak.net Thu Sep 16 15:04:27 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Thu, 16 Sep 2010 15:04:27 +0200 (CEST) Subject: [pypy-svn] r77112 - in pypy/branch/resoperation-refactoring/pypy/jit/metainterp: . optimizeopt test Message-ID: <20100916130427.5C369282C23@codespeak.net> Author: antocuni Date: Thu Sep 16 15:04:25 2010 New Revision: 77112 Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/oparser.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_logger.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_oparser.py Log: (david, antocuni): more ResOperation refactoring: now all the metainterp tests but ztranslation pass Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py Thu Sep 16 15:04:25 2010 @@ -362,7 +362,8 @@ else: raise AssertionError("uh?") op.opnum = opnum - op.args = [op.getarg(0)] + # XXX XXX: fix me when the refactoring is complete + op._args = [op.getarg(0)] else: # a real GUARD_VALUE. Make it use one counter per value. descr.make_a_counter_per_value(op) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py Thu Sep 16 15:04:25 2010 @@ -324,7 +324,7 @@ vrefinfo = self.optimizer.metainterp_sd.virtualref_info # op.getarg(1) should really never point to null here # - set 'forced' to point to the real object - op1 = ResOperation(rop.SETFIELD_GC, op.args, None, + op1 = ResOperation(rop.SETFIELD_GC, op._args, None, descr = vrefinfo.descr_forced) self.optimize_SETFIELD_GC(op1) # - set 'virtual_token' to TOKEN_NONE Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py Thu Sep 16 15:04:25 2010 @@ -2112,8 +2112,8 @@ op = self.history.operations.pop() assert op.opnum == rop.CALL_MAY_FORCE num_green_args = targetjitdriver_sd.num_green_args - greenargs = op.args[1:num_green_args+1] - args = op.args[num_green_args+1:] + greenargs = op.sliceargs(1, num_green_args+1) + args = op.sliceargs(num_green_args+1, op.numargs()) assert len(args) == targetjitdriver_sd.num_red_args vinfo = targetjitdriver_sd.virtualizable_info if vinfo is not None: @@ -2124,7 +2124,7 @@ warmrunnerstate = targetjitdriver_sd.warmstate token = warmrunnerstate.get_assembler_token(greenargs, args) op.opnum = rop.CALL_ASSEMBLER - op.args = args + op.setarglist(args) op.descr = token self.history.operations.append(op) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py Thu Sep 16 15:04:25 2010 @@ -12,7 +12,7 @@ # change ARRAYCOPY to call, so we don't have to pass around # unnecessary information to the backend. Do the same with VIRTUAL_REF_*. if op.opnum == rop.ARRAYCOPY: - descr = op.args[0] + descr = op.getarg(0) assert isinstance(descr, AbstractDescr) args = op.sliceargs(1, op.numargs()) op = ResOperation(rop.CALL, args, op.result, descr=descr) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/oparser.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/oparser.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/oparser.py Thu Sep 16 15:04:25 2010 @@ -26,7 +26,7 @@ def opboxes(operations): for op in operations: yield op.result - for box in op.args: + for box in op.sliceargs(0, op.numargs()): yield box def allboxes(): for box in self.inputargs: Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_logger.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_logger.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_logger.py Thu Sep 16 15:04:25 2010 @@ -100,8 +100,8 @@ debug_merge_point("info") ''' loop, oloop = self.reparse(inp, check_equal=False) - assert loop.operations[0].args[0]._get_str() == 'info' - assert oloop.operations[0].args[0]._get_str() == 'info' + assert loop.operations[0].getarg(0)._get_str() == 'info' + assert oloop.operations[0].getarg(0)._get_str() == 'info' def test_floats(self): inp = ''' Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_oparser.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_oparser.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_oparser.py Thu Sep 16 15:04:25 2010 @@ -111,7 +111,7 @@ TP = lltype.GcArray(lltype.Signed) NULL = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.nullptr(TP)) loop = parse(x, None, {'func_ptr' : NULL}) - assert loop.operations[0].args[0].value == NULL + assert loop.operations[0].getarg(0).value == NULL def test_jump_target(): x = ''' @@ -136,7 +136,7 @@ f1 = float_add(f0, 3.5) ''' loop = parse(x) - assert isinstance(loop.operations[0].args[0], BoxFloat) + assert isinstance(loop.operations[0].getarg(0), BoxFloat) def test_debug_merge_point(): x = ''' @@ -147,10 +147,10 @@ debug_merge_point('(stuff) #1') ''' loop = parse(x) - assert loop.operations[0].args[0]._get_str() == 'info' - assert loop.operations[1].args[0]._get_str() == 'info' - assert loop.operations[2].args[0]._get_str() == " info" - assert loop.operations[3].args[0]._get_str() == "(stuff) #1" + assert loop.operations[0].getarg(0)._get_str() == 'info' + assert loop.operations[1].getarg(0)._get_str() == 'info' + assert loop.operations[2].getarg(0)._get_str() == " info" + assert loop.operations[3].getarg(0)._get_str() == "(stuff) #1" def test_descr_with_obj_print(): From arigo at codespeak.net Thu Sep 16 15:04:45 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 16 Sep 2010 15:04:45 +0200 (CEST) Subject: [pypy-svn] r77113 - in pypy/branch/gen2-gc/pypy/rpython/memory/gc: . test Message-ID: <20100916130445.130AB282C23@codespeak.net> Author: arigo Date: Thu Sep 16 15:04:43 2010 New Revision: 77113 Added: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimark.py (contents, props changed) Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Log: * Tweak the small_request_threshold to ensure that unicode builders of the default size (100) are also small. * Write placeholders for all special functions used by gctransform. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Thu Sep 16 15:04:43 2010 @@ -89,8 +89,9 @@ "arena_size": 65536*WORD, # The maximum size of an object allocated compactly. All objects - # that are larger are just allocated with raw_malloc(). - "small_request_threshold": 32*WORD, + # that are larger are just allocated with raw_malloc(). The value + # chosen here is enough for a unicode string of length 100. + "small_request_threshold": 52*WORD, # Full collection threshold: after a major collection, we record # the total size consumed; and after every minor collection, if the @@ -355,6 +356,30 @@ # ---------- + # Other functions in the GC API + + def set_max_heap_size(self, size): + XXX + + def can_malloc_nonmovable(self): + XXX + + def can_move(self, addr): + """Overrides the parent can_move().""" + return self.is_in_nursery(addr) + + def shrink_array(self, addr, newsize): + XXX + + def malloc_varsize_nonmovable(self, typeid, length): + XXX + + def malloc_nonmovable(self, typeid, length, zero): + # helper for testing, same as GCBase.malloc + XXX + + + # ---------- # Simple helpers def get_type_id(self, obj): @@ -390,10 +415,6 @@ obj = llarena.getfakearenaaddress(obj) return obj.address[0] - def can_move(self, addr): - """Overrides the parent can_move().""" - return self.is_in_nursery(addr) - def get_total_memory_used(self): """Return the total memory used, not counting any object in the nursery: only objects in the ArenaCollection or raw-malloced. Added: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimark.py ============================================================================== --- (empty file) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimark.py Thu Sep 16 15:04:43 2010 @@ -0,0 +1,20 @@ +from pypy.rpython.lltypesystem import llmemory +from pypy.rpython.memory.gc.minimark import MiniMarkGC + +# Note that most tests are in test_direct.py. + + +def test_stringbuilder_default_initsize_is_small(): + # Check that pypy.rlib.rstring.INIT_SIZE is short enough to let + # the allocated object be considered as a "small" object. + # Otherwise it would not be allocated in the nursery at all, + # which is kind of bad (and also prevents shrink_array() from + # being useful). + from pypy.rlib.rstring import INIT_SIZE + from pypy.rpython.lltypesystem.rstr import STR, UNICODE + # + size1 = llmemory.raw_malloc_usage(llmemory.sizeof(STR, INIT_SIZE)) + assert size1 <= MiniMarkGC.TRANSLATION_PARAMS["small_request_threshold"] + # + size2 = llmemory.raw_malloc_usage(llmemory.sizeof(UNICODE, INIT_SIZE)) + assert size2 <= MiniMarkGC.TRANSLATION_PARAMS["small_request_threshold"] From antocuni at codespeak.net Thu Sep 16 15:14:35 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Thu, 16 Sep 2010 15:14:35 +0200 (CEST) Subject: [pypy-svn] r77114 - in pypy/branch/resoperation-refactoring/pypy/jit: backend/llsupport backend/llsupport/test metainterp Message-ID: <20100916131435.E8ED0282C23@codespeak.net> Author: antocuni Date: Thu Sep 16 15:14:34 2010 New Revision: 77114 Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/gc.py pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/test/test_gc.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py Log: (david, antocuni) make llsupport tests passing Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/gc.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/gc.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/gc.py Thu Sep 16 15:14:34 2010 @@ -563,8 +563,8 @@ continue # ---------- replace ConstPtrs with GETFIELD_RAW ---------- # xxx some performance issue here - for i in range(len(op.args)): - v = op.args[i] + for i in range(op.numargs()): + v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): addr = self.gcrefs.get_address_of_gcref(v.value) # ^^^even for non-movable objects, to record their presence @@ -574,22 +574,22 @@ newops.append(ResOperation(rop.GETFIELD_RAW, [ConstInt(addr)], box, self.single_gcref_descr)) - op.args[i] = box + op.setarg(i, box) # ---------- write barrier for SETFIELD_GC ---------- if op.opnum == rop.SETFIELD_GC: - v = op.args[1] + v = op.getarg(1) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.args[0], v) - op = ResOperation(rop.SETFIELD_RAW, op.args, None, + self._gen_write_barrier(newops, op.getarg(0), v) + op = ResOperation(rop.SETFIELD_RAW, op._args, None, descr=op.descr) # ---------- write barrier for SETARRAYITEM_GC ---------- if op.opnum == rop.SETARRAYITEM_GC: - v = op.args[2] + v = op.getarg(2) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.args[0], v) - op = ResOperation(rop.SETARRAYITEM_RAW, op.args, None, + self._gen_write_barrier(newops, op.getarg(0), v) + op = ResOperation(rop.SETARRAYITEM_RAW, op._args, None, descr=op.descr) # ---------- newops.append(op) Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/test/test_gc.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/test/test_gc.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/test/test_gc.py Thu Sep 16 15:14:34 2010 @@ -259,8 +259,8 @@ assert llop1.record == [] assert len(newops) == 1 assert newops[0].opnum == rop.COND_CALL_GC_WB - assert newops[0].args[0] == v_base - assert newops[0].args[1] == v_value + assert newops[0].getarg(0) == v_base + assert newops[0].getarg(1) == v_value assert newops[0].result is None wbdescr = newops[0].descr assert isinstance(wbdescr.jit_wb_if_flag, int) @@ -299,12 +299,13 @@ gc_ll_descr.rewrite_assembler(MyFakeCPU(), operations) assert len(operations) == 2 assert operations[0].opnum == rop.GETFIELD_RAW - assert operations[0].args == [ConstInt(43)] + assert operations[0].getarg(0) == ConstInt(43) assert operations[0].descr == gc_ll_descr.single_gcref_descr v_box = operations[0].result assert isinstance(v_box, BoxPtr) assert operations[1].opnum == rop.PTR_EQ - assert operations[1].args == [v_random_box, v_box] + assert operations[1].getarg(0) == v_random_box + assert operations[1].getarg(1) == v_box assert operations[1].result == v_result def test_rewrite_assembler_1_cannot_move(self): @@ -337,7 +338,8 @@ rgc.can_move = old_can_move assert len(operations) == 1 assert operations[0].opnum == rop.PTR_EQ - assert operations[0].args == [v_random_box, ConstPtr(s_gcref)] + assert operations[0].getarg(0) == v_random_box + assert operations[0].getarg(1) == ConstPtr(s_gcref) assert operations[0].result == v_result # check that s_gcref gets added to the list anyway, to make sure # that the GC sees it @@ -357,12 +359,13 @@ assert len(operations) == 2 # assert operations[0].opnum == rop.COND_CALL_GC_WB - assert operations[0].args[0] == v_base - assert operations[0].args[1] == v_value + assert operations[0].getarg(0) == v_base + assert operations[0].getarg(1) == v_value assert operations[0].result is None # assert operations[1].opnum == rop.SETFIELD_RAW - assert operations[1].args == [v_base, v_value] + assert operations[1].getarg(0) == v_base + assert operations[1].getarg(1) == v_value assert operations[1].descr == field_descr def test_rewrite_assembler_3(self): @@ -380,10 +383,12 @@ assert len(operations) == 2 # assert operations[0].opnum == rop.COND_CALL_GC_WB - assert operations[0].args[0] == v_base - assert operations[0].args[1] == v_value + assert operations[0].getarg(0) == v_base + assert operations[0].getarg(1) == v_value assert operations[0].result is None # assert operations[1].opnum == rop.SETARRAYITEM_RAW - assert operations[1].args == [v_base, v_index, v_value] + assert operations[1].getarg(0) == v_base + assert operations[1].getarg(1) == v_index + assert operations[1].getarg(2) == v_value assert operations[1].descr == array_descr Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py Thu Sep 16 15:14:34 2010 @@ -794,7 +794,8 @@ # RPython-friendly print '%r: inputargs =' % self, self._dump_args(self.inputargs) for op in self.operations: - print '\t', op.getopname(), self._dump_args(op.args), \ + args = op.sliceargs(0, op.numargs()) + print '\t', op.getopname(), self._dump_args(args), \ self._dump_box(op.result) def _dump_args(self, boxes): From antocuni at codespeak.net Thu Sep 16 15:20:25 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Thu, 16 Sep 2010 15:20:25 +0200 (CEST) Subject: [pypy-svn] r77115 - pypy/branch/resoperation-refactoring/pypy/jit/backend/x86 Message-ID: <20100916132025.AF2CE282C23@codespeak.net> Author: antocuni Date: Thu Sep 16 15:20:24 2010 New Revision: 77115 Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/assembler.py pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py Log: (david, antocuni) start refactoring ResOperation in the x86 backend Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/assembler.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/assembler.py Thu Sep 16 15:20:24 2010 @@ -390,7 +390,7 @@ for op in operations: if op.opnum == rop.DEBUG_MERGE_POINT: - funcname = op.args[0]._get_str() + funcname = op.getarg(0)._get_str() break else: funcname = "" % len(self.loop_run_counters) @@ -725,7 +725,7 @@ def _cmpop(cond, rev_cond): def genop_cmp(self, op, arglocs, result_loc): rl = result_loc.lowest8bits() - if isinstance(op.args[0], Const): + if isinstance(op.getarg(0), Const): self.mc.CMP(arglocs[1], arglocs[0]) self.mc.SET_ir(rx86.Conditions[rev_cond], rl.value) else: @@ -756,7 +756,7 @@ def _cmpop_guard(cond, rev_cond, false_cond, false_rev_cond): def genop_cmp_guard(self, op, guard_op, guard_token, arglocs, result_loc): guard_opnum = guard_op.opnum - if isinstance(op.args[0], Const): + if isinstance(op.getarg(0), Const): self.mc.CMP(arglocs[1], arglocs[0]) if guard_opnum == rop.GUARD_FALSE: return self.implement_guard(guard_token, rev_cond) @@ -1120,7 +1120,7 @@ assert isinstance(baseofs, ImmedLoc) assert isinstance(scale_loc, ImmedLoc) dest_addr = AddressLoc(base_loc, ofs_loc, scale_loc.value, baseofs.value) - if op.args[2].type == FLOAT: + if op.getarg(2).type == FLOAT: self.mc.MOVSD(dest_addr, value_loc) else: if IS_X86_64 and scale_loc.value == 3: @@ -1244,8 +1244,8 @@ genop_guard_guard_isnull = genop_guard_guard_false def genop_guard_guard_value(self, ign_1, guard_op, guard_token, locs, ign_2): - if guard_op.args[0].type == FLOAT: - assert guard_op.args[1].type == FLOAT + if guard_op.getarg(0).type == FLOAT: + assert guard_op.getarg(1).type == FLOAT self.mc.UCOMISD(locs[0], locs[1]) else: self.mc.CMP(locs[0], locs[1]) @@ -1636,8 +1636,8 @@ assert isinstance(sizeloc, ImmedLoc) size = sizeloc.value - if isinstance(op.args[0], Const): - x = imm(op.args[0].getint()) + if isinstance(op.getarg(0), Const): + x = imm(op.getarg(0).getint()) else: x = arglocs[1] if x is eax: Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py Thu Sep 16 15:20:24 2010 @@ -267,7 +267,7 @@ else: loop_consts = {} for i in range(len(inputargs)): - if inputargs[i] is jump.args[i]: + if inputargs[i] is jump.getarg(i): loop_consts[inputargs[i]] = i return loop_consts @@ -360,7 +360,7 @@ if (operations[i + 1].opnum != rop.GUARD_TRUE and operations[i + 1].opnum != rop.GUARD_FALSE): return False - if operations[i + 1].args[0] is not op.result: + if operations[i + 1].getarg(0) is not op.result: return False if (self.longevity[op.result][1] > i + 1 or op.result in operations[i + 1].fail_args): @@ -402,7 +402,8 @@ op = operations[i] if op.result is not None: start_live[op.result] = i - for arg in op.args: + for j in range(op.numargs()): + arg = op.getarg(j) if isinstance(arg, Box): if arg not in start_live: print "Bogus arg in operation %d at %d" % (op.opnum, i) @@ -432,9 +433,9 @@ return self.rm.loc(v) def _consider_guard(self, op): - loc = self.rm.make_sure_var_in_reg(op.args[0]) + loc = self.rm.make_sure_var_in_reg(op.getarg(0)) self.perform_guard(op, [loc], None) - self.rm.possibly_free_var(op.args[0]) + self.rm.possibly_free_var(op.getarg(0)) consider_guard_true = _consider_guard consider_guard_false = _consider_guard @@ -453,7 +454,7 @@ self.perform_guard(op, [], None) def consider_guard_exception(self, op): - loc = self.rm.make_sure_var_in_reg(op.args[0]) + loc = self.rm.make_sure_var_in_reg(op.getarg(0)) box = TempBox() loc1 = self.rm.force_allocate_reg(box, op.args) if op.result in self.longevity: @@ -469,25 +470,25 @@ consider_guard_overflow = consider_guard_no_exception def consider_guard_value(self, op): - x = self.make_sure_var_in_reg(op.args[0]) - y = self.loc(op.args[1]) + x = self.make_sure_var_in_reg(op.getarg(0)) + y = self.loc(op.getarg(1)) self.perform_guard(op, [x, y], None) self.possibly_free_vars(op.args) def consider_guard_class(self, op): - assert isinstance(op.args[0], Box) - x = self.rm.make_sure_var_in_reg(op.args[0]) - y = self.loc(op.args[1]) + assert isinstance(op.getarg(0), Box) + x = self.rm.make_sure_var_in_reg(op.getarg(0)) + y = self.loc(op.getarg(1)) self.perform_guard(op, [x, y], None) self.rm.possibly_free_vars(op.args) consider_guard_nonnull_class = consider_guard_class def _consider_binop_part(self, op): - x = op.args[0] - argloc = self.loc(op.args[1]) + x = op.getarg(0) + argloc = self.loc(op.getarg(1)) loc = self.rm.force_result_in_reg(op.result, x, op.args) - self.rm.possibly_free_var(op.args[1]) + self.rm.possibly_free_var(op.getarg(1)) return loc, argloc def _consider_binop(self, op): @@ -510,17 +511,17 @@ consider_int_add_ovf = _consider_binop_with_guard def consider_int_neg(self, op): - res = self.rm.force_result_in_reg(op.result, op.args[0]) + res = self.rm.force_result_in_reg(op.result, op.getarg(0)) self.Perform(op, [res], res) consider_int_invert = consider_int_neg def consider_int_lshift(self, op): - if isinstance(op.args[1], Const): - loc2 = self.rm.convert_to_imm(op.args[1]) + if isinstance(op.getarg(1), Const): + loc2 = self.rm.convert_to_imm(op.getarg(1)) else: - loc2 = self.rm.make_sure_var_in_reg(op.args[1], selected_reg=ecx) - loc1 = self.rm.force_result_in_reg(op.result, op.args[0], op.args) + loc2 = self.rm.make_sure_var_in_reg(op.getarg(1), selected_reg=ecx) + loc1 = self.rm.force_result_in_reg(op.result, op.getarg(0), op.args) self.Perform(op, [loc1, loc2], loc1) self.rm.possibly_free_vars(op.args) @@ -528,8 +529,8 @@ consider_uint_rshift = consider_int_lshift def _consider_int_div_or_mod(self, op, resultreg, trashreg): - l0 = self.rm.make_sure_var_in_reg(op.args[0], selected_reg=eax) - l1 = self.rm.make_sure_var_in_reg(op.args[1], selected_reg=ecx) + l0 = self.rm.make_sure_var_in_reg(op.getarg(0), selected_reg=eax) + l1 = self.rm.make_sure_var_in_reg(op.getarg(1), selected_reg=ecx) l2 = self.rm.force_allocate_reg(op.result, selected_reg=resultreg) # the register (eax or edx) not holding what we are looking for # will be just trash after that operation @@ -552,8 +553,8 @@ consider_uint_floordiv = consider_int_floordiv def _consider_compop(self, op, guard_op): - vx = op.args[0] - vy = op.args[1] + vx = op.getarg(0) + vy = op.getarg(1) arglocs = [self.loc(vx), self.loc(vy)] if (vx in self.rm.reg_bindings or vy in self.rm.reg_bindings or isinstance(vx, Const) or isinstance(vy, Const)): @@ -582,8 +583,8 @@ consider_ptr_ne = _consider_compop def _consider_float_op(self, op): - loc1 = self.xrm.loc(op.args[1]) - loc0 = self.xrm.force_result_in_reg(op.result, op.args[0], op.args) + loc1 = self.xrm.loc(op.getarg(1)) + loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0), op.args) self.Perform(op, [loc0, loc1], loc0) self.xrm.possibly_free_vars(op.args) @@ -593,9 +594,9 @@ consider_float_truediv = _consider_float_op def _consider_float_cmp(self, op, guard_op): - loc0 = self.xrm.make_sure_var_in_reg(op.args[0], op.args, + loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0), op.args, imm_fine=False) - loc1 = self.xrm.loc(op.args[1]) + loc1 = self.xrm.loc(op.getarg(1)) arglocs = [loc0, loc1] self.xrm.possibly_free_vars(op.args) if guard_op is None: @@ -612,26 +613,26 @@ consider_float_ge = _consider_float_cmp def consider_float_neg(self, op): - loc0 = self.xrm.force_result_in_reg(op.result, op.args[0]) + loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0)) self.Perform(op, [loc0], loc0) - self.xrm.possibly_free_var(op.args[0]) + self.xrm.possibly_free_var(op.getarg(0)) def consider_float_abs(self, op): - loc0 = self.xrm.force_result_in_reg(op.result, op.args[0]) + loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0)) self.Perform(op, [loc0], loc0) - self.xrm.possibly_free_var(op.args[0]) + self.xrm.possibly_free_var(op.getarg(0)) def consider_cast_float_to_int(self, op): - loc0 = self.xrm.make_sure_var_in_reg(op.args[0], imm_fine=False) + loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0), imm_fine=False) loc1 = self.rm.force_allocate_reg(op.result) self.Perform(op, [loc0], loc1) - self.xrm.possibly_free_var(op.args[0]) + self.xrm.possibly_free_var(op.getarg(0)) def consider_cast_int_to_float(self, op): - loc0 = self.rm.loc(op.args[0]) + loc0 = self.rm.loc(op.getarg(0)) loc1 = self.xrm.force_allocate_reg(op.result) self.Perform(op, [loc0], loc1) - self.rm.possibly_free_var(op.args[0]) + self.rm.possibly_free_var(op.getarg(0)) def _call(self, op, arglocs, force_store=[], guard_not_forced_op=None): save_all_regs = guard_not_forced_op is not None @@ -682,11 +683,11 @@ def consider_cond_call_gc_wb(self, op): assert op.result is None - loc_newvalue = self.rm.make_sure_var_in_reg(op.args[1], op.args) + loc_newvalue = self.rm.make_sure_var_in_reg(op.getarg(1), op.args) # ^^^ we force loc_newvalue in a reg (unless it's a Const), # because it will be needed anyway by the following setfield_gc. # It avoids loading it twice from the memory. - loc_base = self.rm.make_sure_var_in_reg(op.args[0], op.args, + loc_base = self.rm.make_sure_var_in_reg(op.getarg(0), op.args, imm_fine=False) arglocs = [loc_base, loc_newvalue] # add eax, ecx and edx as extra "arguments" to ensure they are @@ -733,7 +734,7 @@ return self._call(op, arglocs) def consider_new_with_vtable(self, op): - classint = op.args[0].getint() + classint = op.getarg(0).getint() descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): self._fastpath_malloc(op, descrsize) @@ -742,34 +743,34 @@ else: args = self.assembler.cpu.gc_ll_descr.args_for_new(descrsize) arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.args[0])) + arglocs.append(self.loc(op.getarg(0))) return self._call(op, arglocs) def consider_newstr(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newstr is not None: # framework GC - loc = self.loc(op.args[0]) + loc = self.loc(op.getarg(0)) return self._call(op, [loc]) # boehm GC (XXX kill the following code at some point) ofs_items, itemsize, ofs = symbolic.get_array_token(rstr.STR, self.translate_support_code) assert itemsize == 1 - return self._malloc_varsize(ofs_items, ofs, 0, op.args[0], + return self._malloc_varsize(ofs_items, ofs, 0, op.getarg(0), op.result) def consider_newunicode(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newunicode is not None: # framework GC - loc = self.loc(op.args[0]) + loc = self.loc(op.getarg(0)) return self._call(op, [loc]) # boehm GC (XXX kill the following code at some point) ofs_items, itemsize, ofs = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code) if itemsize == 4: - return self._malloc_varsize(ofs_items, ofs, 2, op.args[0], + return self._malloc_varsize(ofs_items, ofs, 2, op.getarg(0), op.result) elif itemsize == 2: - return self._malloc_varsize(ofs_items, ofs, 1, op.args[0], + return self._malloc_varsize(ofs_items, ofs, 1, op.getarg(0), op.result) else: assert False, itemsize @@ -801,13 +802,13 @@ # framework GC args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.descr) arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.args[0])) + arglocs.append(self.loc(op.getarg(0))) return self._call(op, arglocs) # boehm GC (XXX kill the following code at some point) scale_of_field, basesize, ofs_length, _ = ( self._unpack_arraydescr(op.descr)) return self._malloc_varsize(basesize, ofs_length, scale_of_field, - op.args[0], op.result) + op.getarg(0), op.result) def _unpack_arraydescr(self, arraydescr): assert isinstance(arraydescr, BaseArrayDescr) @@ -835,8 +836,8 @@ need_lower_byte = True else: need_lower_byte = False - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - value_loc = self.make_sure_var_in_reg(op.args[1], op.args, + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), op.args) + value_loc = self.make_sure_var_in_reg(op.getarg(1), op.args, need_lower_byte=need_lower_byte) self.possibly_free_vars(op.args) self.PerformDiscard(op, [base_loc, ofs_loc, size_loc, value_loc]) @@ -844,9 +845,9 @@ consider_setfield_raw = consider_setfield_gc def consider_strsetitem(self, op): - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args) - value_loc = self.rm.make_sure_var_in_reg(op.args[2], op.args, + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), op.args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), op.args) + value_loc = self.rm.make_sure_var_in_reg(op.getarg(2), op.args, need_lower_byte=True) self.rm.possibly_free_vars(op.args) self.PerformDiscard(op, [base_loc, ofs_loc, value_loc]) @@ -855,14 +856,14 @@ def consider_setarrayitem_gc(self, op): scale, ofs, _, ptr = self._unpack_arraydescr(op.descr) - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), op.args) if scale == 0: need_lower_byte = True else: need_lower_byte = False - value_loc = self.make_sure_var_in_reg(op.args[2], op.args, + value_loc = self.make_sure_var_in_reg(op.getarg(2), op.args, need_lower_byte=need_lower_byte) - ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), op.args) self.possibly_free_vars(op.args) self.PerformDiscard(op, [base_loc, ofs_loc, value_loc, imm(scale), imm(ofs)]) @@ -871,7 +872,7 @@ def consider_getfield_gc(self, op): ofs_loc, size_loc, _ = self._unpack_fielddescr(op.descr) - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), op.args) self.rm.possibly_free_vars(op.args) result_loc = self.force_allocate_reg(op.result) self.Perform(op, [base_loc, ofs_loc, size_loc], result_loc) @@ -882,8 +883,8 @@ def consider_getarrayitem_gc(self, op): scale, ofs, _, _ = self._unpack_arraydescr(op.descr) - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args) + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), op.args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), op.args) self.rm.possibly_free_vars(op.args) result_loc = self.force_allocate_reg(op.result) self.Perform(op, [base_loc, ofs_loc, imm(scale), imm(ofs)], result_loc) @@ -893,8 +894,8 @@ def consider_int_is_true(self, op, guard_op): # doesn't need arg to be in a register - argloc = self.loc(op.args[0]) - self.rm.possibly_free_var(op.args[0]) + argloc = self.loc(op.getarg(0)) + self.rm.possibly_free_var(op.getarg(0)) if guard_op is not None: self.perform_with_guard(op, guard_op, [argloc], None) else: @@ -904,14 +905,14 @@ consider_int_is_zero = consider_int_is_true def consider_same_as(self, op): - argloc = self.loc(op.args[0]) - self.possibly_free_var(op.args[0]) + argloc = self.loc(op.getarg(0)) + self.possibly_free_var(op.getarg(0)) resloc = self.force_allocate_reg(op.result) self.Perform(op, [argloc], resloc) #consider_cast_ptr_to_int = consider_same_as def consider_strlen(self, op): - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), op.args) self.rm.possibly_free_vars(op.args) result_loc = self.rm.force_allocate_reg(op.result) self.Perform(op, [base_loc], result_loc) @@ -922,14 +923,14 @@ arraydescr = op.descr assert isinstance(arraydescr, BaseArrayDescr) ofs = arraydescr.get_ofs_length(self.translate_support_code) - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), op.args) self.rm.possibly_free_vars(op.args) result_loc = self.rm.force_allocate_reg(op.result) self.Perform(op, [base_loc, imm(ofs)], result_loc) def consider_strgetitem(self, op): - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args) + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), op.args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), op.args) self.rm.possibly_free_vars(op.args) result_loc = self.rm.force_allocate_reg(op.result) self.Perform(op, [base_loc, ofs_loc], result_loc) From arigo at codespeak.net Thu Sep 16 15:46:42 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 16 Sep 2010 15:46:42 +0200 (CEST) Subject: [pypy-svn] r77116 - pypy/branch/gen2-gc/pypy/rpython/memory/gc Message-ID: <20100916134642.C6BED282C23@codespeak.net> Author: arigo Date: Thu Sep 16 15:46:41 2010 New Revision: 77116 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Log: Weakrefs. Passes the corresponding tests of test_gc.py. Code mostly copied from semispace.py and generation.py. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Thu Sep 16 15:46:41 2010 @@ -149,9 +149,16 @@ self.rawmalloced_objects = self.AddressStack() self.rawmalloced_total_size = r_uint(0) # - # A list of all objects with finalizers. + # A list of all objects with finalizers (never in the nursery). self.objects_with_finalizers = self.AddressDeque() # + # Two lists of the objects with weakrefs. No weakref can be an + # old object weakly pointing to a young object: indeed, weakrefs + # are immutable so they cannot point to an object that was + # created after it. + self.young_objects_with_weakrefs = self.AddressStack() + self.old_objects_with_weakrefs = self.AddressStack() + # # the start of the nursery: we actually allocate a tiny bit more for # the nursery than really needed, to simplify pointer arithmetic # in malloc_fixedsize_clear(). @@ -171,7 +178,6 @@ def malloc_fixedsize_clear(self, typeid, size, can_collect=True, needs_finalizer=False, contains_weakptr=False): ll_assert(can_collect, "!can_collect") - assert not contains_weakptr # XXX size_gc_header = self.gcheaderbuilder.size_gc_header totalsize = size_gc_header + size rawtotalsize = llmemory.raw_malloc_usage(totalsize) @@ -180,21 +186,22 @@ # The following check should be constant-folded. if needs_finalizer: ll_assert(not contains_weakptr, - "needs_finalizer and contains_weakptr both specified") + "'needs_finalizer' and 'contains_weakptr' both specified") result = self.malloc_with_finalizer(typeid, totalsize) # # If totalsize is greater than small_request_threshold, ask for # a rawmalloc. The following check should be constant-folded. elif rawtotalsize > self.small_request_threshold: + ll_assert(not contains_weakptr, + "'contains_weakptr' specified for a large object") result = self._external_malloc(typeid, totalsize) # else: # If totalsize is smaller than minimal_size_in_nursery, round it # up. The following check should also be constant-folded. - if (rawtotalsize < - llmemory.raw_malloc_usage(self.minimal_size_in_nursery)): - totalsize = llmemory.raw_malloc_usage( - self.minimal_size_in_nursery) + min_size = llmemory.raw_malloc_usage(self.minimal_size_in_nursery) + if rawtotalsize < min_size: + totalsize = rawtotalsize = min_size # # Get the memory from the nursery. If there is not enough space # there, do a collect first. @@ -206,6 +213,10 @@ # Build the object. llarena.arena_reserve(result, totalsize) self.init_gc_object(result, typeid, flags=0) + # + # If it is a weakref, record it (check constant-folded). + if contains_weakptr: + self.young_objects_with_weakrefs.append(result+size_gc_header) # return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF) @@ -408,7 +419,9 @@ "odd-valued (i.e. tagged) pointer unexpected here") return self.nursery <= addr < self.nursery_top - def is_forwarded_marker(self, tid): + def is_forwarded(self, obj): + assert self.is_in_nursery(obj) + tid = self.header(obj).tid return isinstance(tid, int) and tid == FORWARDED_MARKER def get_forwarding_address(self, obj): @@ -551,13 +564,17 @@ # We proceed until 'old_objects_pointing_to_young' is empty. self.collect_oldrefs_to_nursery() # + # Now all live nursery objects should be out. Update the + # young weakrefs' targets. + if self.young_objects_with_weakrefs.length() > 0: + self.invalidate_young_weakrefs() + # # Clear this mapping. if self.young_objects_shadows.length() > 0: self.young_objects_shadows.clear() # - # Now all live nursery objects should be out, and the rest dies. - # Fill the whole nursery with zero and reset the current nursery - # pointer. + # All live nursery objects are out, and the rest dies. Fill + # the whole nursery with zero and reset the current nursery pointer. llarena.arena_reset(self.nursery, self.nursery_size, 2) self.nursery_next = self.nursery # @@ -606,7 +623,7 @@ return # # If 'obj' was already forwarded, change it to its forwarding address. - if self.is_forwarded_marker(self.header(obj).tid): + if self.is_forwarded(obj): root.address[0] = self.get_forwarding_address(obj) #print '(already forwarded)' return @@ -692,6 +709,10 @@ # self.objects_to_trace.delete() # + # Weakref support: clear the weak pointers to dying objects + if self.old_objects_with_weakrefs.non_empty(): + self.invalidate_old_weakrefs() + # # Walk all rawmalloced objects and free the ones that don't # have the GCFLAG_VISITED flag. self.free_unvisited_rawmalloc_objects() @@ -952,6 +973,53 @@ self.visit_all_objects() + # ---------- + # Weakrefs + + # The code relies on the fact that no weakref can be an old object + # weakly pointing to a young object. Indeed, weakrefs are immutable + # so they cannot point to an object that was created after it. + def invalidate_young_weakrefs(self): + """Called during a nursery collection.""" + # walk over the list of objects that contain weakrefs and are in the + # nursery. if the object it references survives then update the + # weakref; otherwise invalidate the weakref + while self.young_objects_with_weakrefs.non_empty(): + obj = self.young_objects_with_weakrefs.pop() + if not self.is_forwarded(obj): + continue # weakref itself dies + obj = self.get_forwarding_address(obj) + offset = self.weakpointer_offset(self.get_type_id(obj)) + pointing_to = (obj + offset).address[0] + if self.is_in_nursery(pointing_to): + if self.is_forwarded(pointing_to): + (obj + offset).address[0] = self.get_forwarding_address( + pointing_to) + else: + (obj + offset).address[0] = llmemory.NULL + continue # no need to remember this weakref any longer + self.old_objects_with_weakrefs.append(obj) + + + def invalidate_old_weakrefs(self): + """Called during a major collection.""" + # walk over list of objects that contain weakrefs + # if the object it references does not survive, invalidate the weakref + new_with_weakref = self.AddressStack() + while self.old_objects_with_weakrefs.non_empty(): + obj = self.old_objects_with_weakrefs.pop() + if self.header(obj).tid & GCFLAG_VISITED == 0: + continue # weakref itself dies + offset = self.weakpointer_offset(self.get_type_id(obj)) + pointing_to = (obj + offset).address[0] + if self.header(pointing_to).tid & GCFLAG_VISITED: + new_with_weakref.append(obj) + else: + (obj + offset).address[0] = llmemory.NULL + self.old_objects_with_weakrefs.delete() + self.old_objects_with_weakrefs = new_with_weakref + + # ____________________________________________________________ # For testing, a simple implementation of ArenaCollection. From cfbolz at codespeak.net Thu Sep 16 15:53:02 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Thu, 16 Sep 2010 15:53:02 +0200 (CEST) Subject: [pypy-svn] r77117 - pypy/branch/better-map-instances/pypy/objspace/std Message-ID: <20100916135302.2958A282C23@codespeak.net> Author: cfbolz Date: Thu Sep 16 15:53:00 2010 New Revision: 77117 Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Log: grrr, reshuffling again Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Thu Sep 16 15:53:00 2010 @@ -377,8 +377,6 @@ pass # mainly for tests def get_subclass_of_correct_size(space, cls, supercls, w_type): - if not hasattr(supercls, "_init_empty"): - return supercls # not a mapdict class assert space.config.objspace.std.withmapdict map = w_type.terminator classes = memo_get_subclass_of_correct_size(space, supercls) @@ -398,6 +396,8 @@ try: return _subclass_cache[key] except KeyError: + if not hasattr(supercls, "_init_empty"): + result = [supercls] * NUM_SUBCLASSES # not a mapdict if (not issubclass(supercls, W_ObjectObject) or hasattr(supercls, '__del__')): class subcls(ObjectMixin, supercls): From arigo at codespeak.net Thu Sep 16 16:00:11 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 16 Sep 2010 16:00:11 +0200 (CEST) Subject: [pypy-svn] r77118 - in pypy/branch/gen2-gc/pypy/rpython/memory: gc test Message-ID: <20100916140011.86799282C23@codespeak.net> Author: arigo Date: Thu Sep 16 16:00:09 2010 New Revision: 77118 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py Log: shrink_array(). Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Thu Sep 16 16:00:09 2010 @@ -375,12 +375,30 @@ def can_malloc_nonmovable(self): XXX - def can_move(self, addr): + def can_move(self, obj): """Overrides the parent can_move().""" - return self.is_in_nursery(addr) + return self.is_in_nursery(obj) + + + def shrink_array(self, obj, smallerlength): + # + # Only objects in the nursery can be "resized". Resizing them + # means recording that they have a smaller size, so that when + # moved out of the nursery, they will consume less memory. + if not self.is_in_nursery(obj): + return False + # + size_gc_header = self.gcheaderbuilder.size_gc_header + typeid = self.get_type_id(obj) + totalsmallersize = ( + size_gc_header + self.fixed_size(typeid) + + self.varsize_item_sizes(typeid) * smallerlength) + llarena.arena_shrink_obj(obj - size_gc_header, totalsmallersize) + # + offset_to_length = self.varsize_offset_to_length(typeid) + (obj + offset_to_length).signed[0] = smallerlength + return True - def shrink_array(self, addr, newsize): - XXX def malloc_varsize_nonmovable(self, typeid, length): XXX Modified: pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py Thu Sep 16 16:00:09 2010 @@ -28,6 +28,7 @@ GC_CAN_MOVE = False GC_CANNOT_MALLOC_NONMOVABLE = False GC_CAN_SHRINK_ARRAY = False + GC_CAN_SHRINK_BIG_ARRAY = False def setup_class(cls): cls._saved_logstate = py.log._getstate() @@ -474,28 +475,27 @@ def test_shrink_array(self): from pypy.rpython.lltypesystem.rstr import STR - GC_CAN_SHRINK_ARRAY = self.GC_CAN_SHRINK_ARRAY - def f(n, m): + def f(n, m, gc_can_shrink_array): ptr = lltype.malloc(STR, n) ptr.hash = 0x62 ptr.chars[0] = 'A' ptr.chars[1] = 'B' ptr.chars[2] = 'C' ptr2 = rgc.ll_shrink_array(ptr, 2) - assert (ptr == ptr2) == GC_CAN_SHRINK_ARRAY + assert (ptr == ptr2) == gc_can_shrink_array rgc.collect() return ( ord(ptr2.chars[0]) + (ord(ptr2.chars[1]) << 8) + (len(ptr2.chars) << 16) + (ptr2.hash << 24)) - assert self.interpret(f, [3, 0]) == 0x62024241 - # don't test with larger numbers of top of the Hybrid GC, because - # the default settings make it a too-large varsized object that - # gets allocated outside the semispace - if not isinstance(self, TestHybridGC): - assert self.interpret(f, [12, 0]) == 0x62024241 + flag = self.GC_CAN_SHRINK_ARRAY + assert self.interpret(f, [3, 0, flag]) == 0x62024241 + # with larger numbers, it gets allocated outside the semispace + # with some GCs. + flag = self.GC_CAN_SHRINK_BIG_ARRAY + assert self.interpret(f, [12, 0, flag]) == 0x62024241 def test_tagged_simple(self): from pypy.rlib.objectmodel import UnboxedValue @@ -630,6 +630,7 @@ GC_CAN_MOVE = True GC_CANNOT_MALLOC_NONMOVABLE = True GC_CAN_SHRINK_ARRAY = True + GC_CAN_SHRINK_BIG_ARRAY = True class TestGrowingSemiSpaceGC(TestSemiSpaceGC): GC_PARAMS = {'space_size': 16*WORD} @@ -641,6 +642,7 @@ from pypy.rpython.memory.gc.markcompact import MarkCompactGC as GCClass GC_PARAMS = {'space_size': 65536+16384} GC_CAN_SHRINK_ARRAY = False + GC_CAN_SHRINK_BIG_ARRAY = False def test_finalizer_order(self): py.test.skip("Not implemented yet") @@ -651,6 +653,7 @@ class TestHybridGC(TestGenerationalGC): from pypy.rpython.memory.gc.hybrid import HybridGC as GCClass GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_SHRINK_BIG_ARRAY = False def test_ref_from_rawmalloced_to_regular(self): import gc @@ -768,3 +771,4 @@ class TestMiniMarkGC(TestSemiSpaceGC): from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass + GC_CAN_SHRINK_BIG_ARRAY = False From arigo at codespeak.net Thu Sep 16 16:17:15 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 16 Sep 2010 16:17:15 +0200 (CEST) Subject: [pypy-svn] r77119 - in pypy/branch/gen2-gc/pypy/rpython/memory: gc test Message-ID: <20100916141715.17837282C23@codespeak.net> Author: arigo Date: Thu Sep 16 16:17:13 2010 New Revision: 77119 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py Log: Malloc_nonmovable. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Thu Sep 16 16:17:13 2010 @@ -345,6 +345,7 @@ if rawtotalsize <= self.small_request_threshold: # # Ask the ArenaCollection to do the malloc. + totalsize = llarena.round_up_for_allocation(totalsize) result = self.ac.malloc(totalsize) # else: @@ -373,7 +374,7 @@ XXX def can_malloc_nonmovable(self): - XXX + return True def can_move(self, obj): """Overrides the parent can_move().""" @@ -400,12 +401,36 @@ return True + def malloc_fixedsize_nonmovable(self, typeid): + size_gc_header = self.gcheaderbuilder.size_gc_header + totalsize = size_gc_header + self.fixed_size(typeid) + # + result = self._malloc_nonmovable(typeid, totalsize) + return result + size_gc_header + def malloc_varsize_nonmovable(self, typeid, length): - XXX + size_gc_header = self.gcheaderbuilder.size_gc_header + nonvarsize = size_gc_header + self.fixed_size(typeid) + itemsize = self.varsize_item_sizes(typeid) + offset_to_length = self.varsize_offset_to_length(typeid) + try: + varsize = ovfcheck(itemsize * length) + totalsize = ovfcheck(nonvarsize + varsize) + except OverflowError: + raise MemoryError + # + result = self._malloc_nonmovable(typeid, totalsize) + obj = result + size_gc_header + (obj + offset_to_length).signed[0] = length + return obj def malloc_nonmovable(self, typeid, length, zero): # helper for testing, same as GCBase.malloc - XXX + if self.is_varsize(typeid): + obj = self.malloc_varsize_nonmovable(typeid, length) + else: + obj = self.malloc_fixedsize_nonmovable(typeid) + return obj # ---------- Modified: pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py Thu Sep 16 16:17:13 2010 @@ -772,3 +772,4 @@ class TestMiniMarkGC(TestSemiSpaceGC): from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass GC_CAN_SHRINK_BIG_ARRAY = False + GC_CANNOT_MALLOC_NONMOVABLE = False From cfbolz at codespeak.net Thu Sep 16 16:53:17 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Thu, 16 Sep 2010 16:53:17 +0200 (CEST) Subject: [pypy-svn] r77120 - in pypy/branch/better-map-instances/pypy: interpreter objspace/std objspace/std/test Message-ID: <20100916145317.25842282C23@codespeak.net> Author: cfbolz Date: Thu Sep 16 16:53:15 2010 New Revision: 77120 Modified: pypy/branch/better-map-instances/pypy/interpreter/typedef.py pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py pypy/branch/better-map-instances/pypy/objspace/std/objspace.py pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py Log: maybe this has a chance to work Modified: pypy/branch/better-map-instances/pypy/interpreter/typedef.py ============================================================================== --- pypy/branch/better-map-instances/pypy/interpreter/typedef.py (original) +++ pypy/branch/better-map-instances/pypy/interpreter/typedef.py Thu Sep 16 16:53:15 2010 @@ -193,14 +193,15 @@ def add(Proto): for key, value in Proto.__dict__.items(): - if not key.startswith('__') or key == '__del__': + if not key.startswith('__') or key in ['__del__', '_mixin_']: if hasattr(value, "func_name"): value = func_with_new_name(value, value.func_name) body[key] = value if (config.objspace.std.withmapdict and "dict" in features): - from pypy.objspace.std.mapdict import BaseMapdictObject + from pypy.objspace.std.mapdict import BaseMapdictObject, ObjectMixin add(BaseMapdictObject) + add(ObjectMixin) features = () if "user" in features: # generic feature needed by all subcls Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Thu Sep 16 16:53:15 2010 @@ -272,7 +272,9 @@ SLOTS_STARTING_FROM = 3 -class BaseMapdictObject(W_Root): # slightly evil to make it inherit from W_Root +class BaseMapdictObject: # slightly evil to make it inherit from W_Root + _mixin_ = True # XXX hack hack hack + def _init_empty(self, map): raise NotImplementedError("abstract base class") @@ -373,13 +375,13 @@ def _set_mapdict_storage(self, storage): self.storage = storage -class Object(ObjectMixin, BaseMapdictObject): +class Object(ObjectMixin, BaseMapdictObject, W_Root): pass # mainly for tests -def get_subclass_of_correct_size(space, cls, supercls, w_type): +def get_subclass_of_correct_size(space, cls, w_type): assert space.config.objspace.std.withmapdict map = w_type.terminator - classes = memo_get_subclass_of_correct_size(space, supercls) + classes = memo_get_subclass_of_correct_size(space, cls) size = map.size_estimate() if not size: size = 1 @@ -396,18 +398,10 @@ try: return _subclass_cache[key] except KeyError: - if not hasattr(supercls, "_init_empty"): - result = [supercls] * NUM_SUBCLASSES # not a mapdict - if (not issubclass(supercls, W_ObjectObject) or - hasattr(supercls, '__del__')): - class subcls(ObjectMixin, supercls): - pass - subcls.__name__ = supercls.__name__ + "Concrete" - result = [subcls] * NUM_SUBCLASSES - else: - result = [] - for i in range(1, NUM_SUBCLASSES+1): - result.append(_make_subclass_size_n(supercls, i)) + assert not hasattr(supercls, "__del__") + result = [] + for i in range(1, NUM_SUBCLASSES+1): + result.append(_make_subclass_size_n(supercls, i)) _subclass_cache[key] = result return result memo_get_subclass_of_correct_size._annspecialcase_ = "specialize:memo" @@ -418,7 +412,7 @@ rangen = unroll.unrolling_iterable(range(n)) nmin1 = n - 1 rangenmin1 = unroll.unrolling_iterable(range(nmin1)) - class subcls(supercls): + class subcls(ObjectMixin, BaseMapdictObject, supercls): def _init_empty(self, map): from pypy.rlib.debug import make_sure_not_resized for i in rangen: Modified: pypy/branch/better-map-instances/pypy/objspace/std/objspace.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/objspace.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/objspace.py Thu Sep 16 16:53:15 2010 @@ -23,6 +23,7 @@ from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.longobject import W_LongObject from pypy.objspace.std.noneobject import W_NoneObject +from pypy.objspace.std.objectobject import W_ObjectObject from pypy.objspace.std.ropeobject import W_RopeObject from pypy.objspace.std.iterobject import W_SeqIterObject from pypy.objspace.std.setobject import W_SetObject, W_FrozensetObject @@ -318,12 +319,14 @@ w_subtype = w_type.check_user_subclass(w_subtype) if cls.typedef.applevel_subclasses_base is not None: cls = cls.typedef.applevel_subclasses_base - subcls = get_unique_interplevel_subclass( - self.config, cls, w_subtype.hasdict, w_subtype.nslots != 0, - w_subtype.needsdel, w_subtype.weakrefable) - if self.config.objspace.std.withmapdict: + if (self.config.objspace.std.withmapdict and cls is W_ObjectObject + and not w_subtype.needsdel): from pypy.objspace.std.mapdict import get_subclass_of_correct_size - subcls = get_subclass_of_correct_size(self, cls, subcls, w_subtype) + subcls = get_subclass_of_correct_size(self, cls, w_subtype) + else: + subcls = get_unique_interplevel_subclass( + self.config, cls, w_subtype.hasdict, w_subtype.nslots != 0, + w_subtype.needsdel, w_subtype.weakrefable) instance = instantiate(subcls) assert isinstance(instance, cls) instance.user_setup(self, w_subtype) Modified: pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py Thu Sep 16 16:53:15 2010 @@ -360,10 +360,7 @@ def test_specialized_class(): from pypy.objspace.std.objectobject import W_ObjectObject from pypy.rlib import rerased - class Object(BaseMapdictObject, W_ObjectObject): # slightly evil - class typedef: - hasdict = False - classes = memo_get_subclass_of_correct_size(space, Object) + classes = memo_get_subclass_of_correct_size(space, W_ObjectObject) w1 = W_Root() w2 = W_Root() w3 = W_Root() From antocuni at codespeak.net Thu Sep 16 16:57:06 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Thu, 16 Sep 2010 16:57:06 +0200 (CEST) Subject: [pypy-svn] r77121 - in pypy/branch/resoperation-refactoring/pypy/jit/backend: llsupport test x86 Message-ID: <20100916145706.98079282C23@codespeak.net> Author: antocuni Date: Thu Sep 16 16:57:05 2010 New Revision: 77121 Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/regalloc.py pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py Log: (david, antocuni) make x86 tests passing Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/regalloc.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/regalloc.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/regalloc.py Thu Sep 16 16:57:05 2010 @@ -81,6 +81,10 @@ for v in vars: self.possibly_free_var(v) + def possibly_free_vars_for_op(self, op): + for i in range(op.numargs()): + self.possibly_free_var(op.getarg(i)) + def _check_invariants(self): if not we_are_translated(): # make sure no duplicates Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py Thu Sep 16 16:57:05 2010 @@ -86,7 +86,7 @@ def process_operation(self, s, op, names, subops): args = [] - for v in op.args: + for v in op.sliceargs(0, op.numargs()): if v in names: args.append(names[v]) ## elif isinstance(v, ConstAddr): @@ -129,7 +129,7 @@ def print_loop_prebuilt(ops): for op in ops: - for arg in op.args: + for arg in op.sliceargs(0, op.numargs()): if isinstance(arg, ConstPtr): if arg not in names: writevar(arg, 'const_ptr') @@ -553,7 +553,7 @@ endvars = [] used_later = {} for op in loop.operations: - for v in op.args: + for v in op.sliceargs(0, op.numargs()): used_later[v] = True for v in startvars: if v not in used_later: @@ -581,7 +581,7 @@ return self.should_fail_by.fail_args else: assert self.should_fail_by.opnum == rop.FINISH - return self.should_fail_by.args + return self.should_fail_by.sliceargs(0, self.should_fail_by.numargs()) def clear_state(self): for v, S, fields in self.prebuilt_ptr_consts: Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py Thu Sep 16 16:57:05 2010 @@ -234,6 +234,12 @@ else: self.rm.possibly_free_var(var) + def possibly_free_vars_for_op(self, op): + for i in range(op.numargs()): + var = op.getarg(i) + if var is not None: # xxx kludgy + self.possibly_free_var(var) + def possibly_free_vars(self, vars): for var in vars: if var is not None: # xxx kludgy @@ -376,7 +382,7 @@ self.xrm.position = i if op.has_no_side_effect() and op.result not in self.longevity: i += 1 - self.possibly_free_vars(op.args) + self.possibly_free_vars_for_op(op) continue if self.can_merge_with_next_guard(op, i, operations): oplist_with_guard[op.opnum](self, op, operations[i + 1]) @@ -443,12 +449,12 @@ consider_guard_isnull = _consider_guard def consider_finish(self, op): - locs = [self.loc(v) for v in op.args] - locs_are_ref = [v.type == REF for v in op.args] + locs = [self.loc(op.getarg(i)) for i in range(op.numargs())] + locs_are_ref = [op.getarg(i).type == REF for i in range(op.numargs())] fail_index = self.assembler.cpu.get_fail_descr_number(op.descr) self.assembler.generate_failure(fail_index, locs, self.exc, locs_are_ref) - self.possibly_free_vars(op.args) + self.possibly_free_vars_for_op(op) def consider_guard_no_exception(self, op): self.perform_guard(op, [], None) @@ -456,14 +462,15 @@ def consider_guard_exception(self, op): loc = self.rm.make_sure_var_in_reg(op.getarg(0)) box = TempBox() - loc1 = self.rm.force_allocate_reg(box, op.args) + args = op.sliceargs(0, op.numargs()) + loc1 = self.rm.force_allocate_reg(box, args) if op.result in self.longevity: # this means, is it ever used - resloc = self.rm.force_allocate_reg(op.result, op.args + [box]) + resloc = self.rm.force_allocate_reg(op.result, args + [box]) else: resloc = None self.perform_guard(op, [loc, loc1], resloc) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) self.rm.possibly_free_var(box) consider_guard_no_overflow = consider_guard_no_exception @@ -473,21 +480,22 @@ x = self.make_sure_var_in_reg(op.getarg(0)) y = self.loc(op.getarg(1)) self.perform_guard(op, [x, y], None) - self.possibly_free_vars(op.args) + self.possibly_free_vars_for_op(op) def consider_guard_class(self, op): assert isinstance(op.getarg(0), Box) x = self.rm.make_sure_var_in_reg(op.getarg(0)) y = self.loc(op.getarg(1)) self.perform_guard(op, [x, y], None) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) consider_guard_nonnull_class = consider_guard_class def _consider_binop_part(self, op): x = op.getarg(0) argloc = self.loc(op.getarg(1)) - loc = self.rm.force_result_in_reg(op.result, x, op.args) + args = op.sliceargs(0, op.numargs()) + loc = self.rm.force_result_in_reg(op.result, x, args) self.rm.possibly_free_var(op.getarg(1)) return loc, argloc @@ -521,9 +529,10 @@ loc2 = self.rm.convert_to_imm(op.getarg(1)) else: loc2 = self.rm.make_sure_var_in_reg(op.getarg(1), selected_reg=ecx) - loc1 = self.rm.force_result_in_reg(op.result, op.getarg(0), op.args) + args = op.sliceargs(0, op.numargs()) + loc1 = self.rm.force_result_in_reg(op.result, op.getarg(0), args) self.Perform(op, [loc1, loc2], loc1) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) consider_int_rshift = consider_int_lshift consider_uint_rshift = consider_int_lshift @@ -539,7 +548,7 @@ assert l0 is eax assert l1 is ecx assert l2 is resultreg - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) self.rm.possibly_free_var(tmpvar) def consider_int_mod(self, op): @@ -561,9 +570,10 @@ pass else: arglocs[0] = self.rm.make_sure_var_in_reg(vx) - self.rm.possibly_free_vars(op.args) + args = op.sliceargs(0, op.numargs()) + self.rm.possibly_free_vars(args) if guard_op is None: - loc = self.rm.force_allocate_reg(op.result, op.args, + loc = self.rm.force_allocate_reg(op.result, args, need_lower_byte=True) self.Perform(op, arglocs, loc) else: @@ -584,9 +594,10 @@ def _consider_float_op(self, op): loc1 = self.xrm.loc(op.getarg(1)) - loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0), op.args) + args = op.sliceargs(0, op.numargs()) + loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0), args) self.Perform(op, [loc0, loc1], loc0) - self.xrm.possibly_free_vars(op.args) + self.xrm.possibly_free_vars_for_op(op) consider_float_add = _consider_float_op consider_float_sub = _consider_float_op @@ -594,11 +605,12 @@ consider_float_truediv = _consider_float_op def _consider_float_cmp(self, op, guard_op): - loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0), op.args, + args = op.sliceargs(0, op.numargs()) + loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0), args, imm_fine=False) loc1 = self.xrm.loc(op.getarg(1)) arglocs = [loc0, loc1] - self.xrm.possibly_free_vars(op.args) + self.xrm.possibly_free_vars_for_op(op) if guard_op is None: res = self.rm.force_allocate_reg(op.result, need_lower_byte=True) self.Perform(op, arglocs, res) @@ -653,9 +665,9 @@ def _consider_call(self, op, guard_not_forced_op=None): calldescr = op.descr assert isinstance(calldescr, BaseCallDescr) - assert len(calldescr.arg_classes) == len(op.args) - 1 + assert len(calldescr.arg_classes) == op.numargs() - 1 size = calldescr.get_result_size(self.translate_support_code) - self._call(op, [imm(size)] + [self.loc(arg) for arg in op.args], + self._call(op, [imm(size)] + [self.loc(op.getarg(i)) for i in range(op.numargs())], guard_not_forced_op=guard_not_forced_op) def consider_call(self, op): @@ -673,21 +685,22 @@ size = jd.portal_calldescr.get_result_size(self.translate_support_code) vable_index = jd.index_of_virtualizable if vable_index >= 0: - self.rm._sync_var(op.args[vable_index]) - vable = self.fm.loc(op.args[vable_index]) + self.rm._sync_var(op.getarg(vable_index)) + vable = self.fm.loc(op.getarg(vable_index)) else: vable = imm(0) self._call(op, [imm(size), vable] + - [self.loc(arg) for arg in op.args], + [self.loc(op.getarg(i)) for i in range(op.numargs())], guard_not_forced_op=guard_op) def consider_cond_call_gc_wb(self, op): assert op.result is None - loc_newvalue = self.rm.make_sure_var_in_reg(op.getarg(1), op.args) + args = op.sliceargs(0, op.numargs()) + loc_newvalue = self.rm.make_sure_var_in_reg(op.getarg(1), args) # ^^^ we force loc_newvalue in a reg (unless it's a Const), # because it will be needed anyway by the following setfield_gc. # It avoids loading it twice from the memory. - loc_base = self.rm.make_sure_var_in_reg(op.getarg(0), op.args, + loc_base = self.rm.make_sure_var_in_reg(op.getarg(0), args, imm_fine=False) arglocs = [loc_base, loc_newvalue] # add eax, ecx and edx as extra "arguments" to ensure they are @@ -701,7 +714,7 @@ and self.rm.stays_alive(v)): arglocs.append(reg) self.PerformDiscard(op, arglocs) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) def _fastpath_malloc(self, op, descr): assert isinstance(descr, BaseSizeDescr) @@ -836,35 +849,38 @@ need_lower_byte = True else: need_lower_byte = False - base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), op.args) - value_loc = self.make_sure_var_in_reg(op.getarg(1), op.args, + args = op.sliceargs(0, op.numargs()) + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + value_loc = self.make_sure_var_in_reg(op.getarg(1), args, need_lower_byte=need_lower_byte) - self.possibly_free_vars(op.args) + self.possibly_free_vars(args) self.PerformDiscard(op, [base_loc, ofs_loc, size_loc, value_loc]) consider_setfield_raw = consider_setfield_gc def consider_strsetitem(self, op): - base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), op.args) - ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), op.args) - value_loc = self.rm.make_sure_var_in_reg(op.getarg(2), op.args, + args = op.sliceargs(0, op.numargs()) + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) + value_loc = self.rm.make_sure_var_in_reg(op.getarg(2), args, need_lower_byte=True) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) self.PerformDiscard(op, [base_loc, ofs_loc, value_loc]) consider_unicodesetitem = consider_strsetitem def consider_setarrayitem_gc(self, op): scale, ofs, _, ptr = self._unpack_arraydescr(op.descr) - base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), op.args) + args = op.sliceargs(0, op.numargs()) + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) if scale == 0: need_lower_byte = True else: need_lower_byte = False - value_loc = self.make_sure_var_in_reg(op.getarg(2), op.args, + value_loc = self.make_sure_var_in_reg(op.getarg(2), args, need_lower_byte=need_lower_byte) - ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), op.args) - self.possibly_free_vars(op.args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) + self.possibly_free_vars(args) self.PerformDiscard(op, [base_loc, ofs_loc, value_loc, imm(scale), imm(ofs)]) @@ -872,8 +888,9 @@ def consider_getfield_gc(self, op): ofs_loc, size_loc, _ = self._unpack_fielddescr(op.descr) - base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), op.args) - self.rm.possibly_free_vars(op.args) + args = op.sliceargs(0, op.numargs()) + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + self.rm.possibly_free_vars(args) result_loc = self.force_allocate_reg(op.result) self.Perform(op, [base_loc, ofs_loc, size_loc], result_loc) @@ -883,9 +900,10 @@ def consider_getarrayitem_gc(self, op): scale, ofs, _, _ = self._unpack_arraydescr(op.descr) - base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), op.args) - ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), op.args) - self.rm.possibly_free_vars(op.args) + args = op.sliceargs(0, op.numargs()) + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) + self.rm.possibly_free_vars_for_op(op) result_loc = self.force_allocate_reg(op.result) self.Perform(op, [base_loc, ofs_loc, imm(scale), imm(ofs)], result_loc) @@ -912,8 +930,9 @@ #consider_cast_ptr_to_int = consider_same_as def consider_strlen(self, op): - base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), op.args) - self.rm.possibly_free_vars(op.args) + args = op.sliceargs(0, op.numargs()) + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + self.rm.possibly_free_vars_for_op(op) result_loc = self.rm.force_allocate_reg(op.result) self.Perform(op, [base_loc], result_loc) @@ -923,15 +942,17 @@ arraydescr = op.descr assert isinstance(arraydescr, BaseArrayDescr) ofs = arraydescr.get_ofs_length(self.translate_support_code) - base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), op.args) - self.rm.possibly_free_vars(op.args) + args = op.sliceargs(0, op.numargs()) + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + self.rm.possibly_free_vars_for_op(op) result_loc = self.rm.force_allocate_reg(op.result) self.Perform(op, [base_loc, imm(ofs)], result_loc) def consider_strgetitem(self, op): - base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), op.args) - ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), op.args) - self.rm.possibly_free_vars(op.args) + args = op.sliceargs(0, op.numargs()) + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) + self.rm.possibly_free_vars_for_op(op) result_loc = self.rm.force_allocate_reg(op.result) self.Perform(op, [base_loc, ofs_loc], result_loc) @@ -952,17 +973,20 @@ xmmtmp = X86XMMRegisterManager.all_regs[0] xmmtmploc = self.xrm.force_allocate_reg(box1, selected_reg=xmmtmp) # Part about non-floats - src_locations = [self.loc(arg) for arg in op.args if arg.type != FLOAT] + # XXX we don't need a copy, we only just the original list + src_locations = [self.loc(op.getarg(i)) for i in range(op.numargs()) + if op.getarg(i).type != FLOAT] assert tmploc not in nonfloatlocs dst_locations = [loc for loc in nonfloatlocs if loc is not None] remap_frame_layout(assembler, src_locations, dst_locations, tmploc) # Part about floats - src_locations = [self.loc(arg) for arg in op.args if arg.type == FLOAT] + src_locations = [self.loc(op.getarg(i)) for i in range(op.numargs()) + if op.getarg(i).type == FLOAT] dst_locations = [loc for loc in floatlocs if loc is not None] remap_frame_layout(assembler, src_locations, dst_locations, xmmtmp) self.rm.possibly_free_var(box) self.xrm.possibly_free_var(box1) - self.possibly_free_vars(op.args) + self.possibly_free_vars_for_op(op) assembler.closing_jump(self.jump_target_descr) def consider_debug_merge_point(self, op): From cfbolz at codespeak.net Thu Sep 16 17:35:22 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Thu, 16 Sep 2010 17:35:22 +0200 (CEST) Subject: [pypy-svn] r77122 - pypy/branch/better-map-instances/pypy/interpreter Message-ID: <20100916153522.64111282C23@codespeak.net> Author: cfbolz Date: Thu Sep 16 17:35:20 2010 New Revision: 77122 Modified: pypy/branch/better-map-instances/pypy/interpreter/typedef.py Log: grumble Modified: pypy/branch/better-map-instances/pypy/interpreter/typedef.py ============================================================================== --- pypy/branch/better-map-instances/pypy/interpreter/typedef.py (original) +++ pypy/branch/better-map-instances/pypy/interpreter/typedef.py Thu Sep 16 17:35:20 2010 @@ -193,7 +193,8 @@ def add(Proto): for key, value in Proto.__dict__.items(): - if not key.startswith('__') or key in ['__del__', '_mixin_']: + if (not key.startswith('__') and not key.startswith('_mixin_') + or key == '__del__'): if hasattr(value, "func_name"): value = func_with_new_name(value, value.func_name) body[key] = value From antocuni at codespeak.net Thu Sep 16 17:38:24 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Thu, 16 Sep 2010 17:38:24 +0200 (CEST) Subject: [pypy-svn] r77123 - in pypy/branch/resoperation-refactoring/pypy/jit: backend/test backend/x86 metainterp metainterp/optimizeopt metainterp/test Message-ID: <20100916153824.E5D7B282C23@codespeak.net> Author: antocuni Date: Thu Sep 16 17:38:23 2010 New Revision: 77123 Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/graphpage.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/heap.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/oparser.py Log: (david, antocuni): replace sliceargs with getarglist, so that in the future operations like CALL or JUMP will be able to implement it more efficiently Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py Thu Sep 16 17:38:23 2010 @@ -86,7 +86,7 @@ def process_operation(self, s, op, names, subops): args = [] - for v in op.sliceargs(0, op.numargs()): + for v in op.getarglist(): if v in names: args.append(names[v]) ## elif isinstance(v, ConstAddr): @@ -129,7 +129,7 @@ def print_loop_prebuilt(ops): for op in ops: - for arg in op.sliceargs(0, op.numargs()): + for arg in op.getarglist(): if isinstance(arg, ConstPtr): if arg not in names: writevar(arg, 'const_ptr') @@ -553,7 +553,7 @@ endvars = [] used_later = {} for op in loop.operations: - for v in op.sliceargs(0, op.numargs()): + for v in op.getarglist(): used_later[v] = True for v in startvars: if v not in used_later: @@ -581,7 +581,7 @@ return self.should_fail_by.fail_args else: assert self.should_fail_by.opnum == rop.FINISH - return self.should_fail_by.sliceargs(0, self.should_fail_by.numargs()) + return self.should_fail_by.getarglist() def clear_state(self): for v, S, fields in self.prebuilt_ptr_consts: Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py Thu Sep 16 17:38:23 2010 @@ -462,7 +462,7 @@ def consider_guard_exception(self, op): loc = self.rm.make_sure_var_in_reg(op.getarg(0)) box = TempBox() - args = op.sliceargs(0, op.numargs()) + args = op.getarglist() loc1 = self.rm.force_allocate_reg(box, args) if op.result in self.longevity: # this means, is it ever used @@ -494,7 +494,7 @@ def _consider_binop_part(self, op): x = op.getarg(0) argloc = self.loc(op.getarg(1)) - args = op.sliceargs(0, op.numargs()) + args = op.getarglist() loc = self.rm.force_result_in_reg(op.result, x, args) self.rm.possibly_free_var(op.getarg(1)) return loc, argloc @@ -529,7 +529,7 @@ loc2 = self.rm.convert_to_imm(op.getarg(1)) else: loc2 = self.rm.make_sure_var_in_reg(op.getarg(1), selected_reg=ecx) - args = op.sliceargs(0, op.numargs()) + args = op.getarglist() loc1 = self.rm.force_result_in_reg(op.result, op.getarg(0), args) self.Perform(op, [loc1, loc2], loc1) self.rm.possibly_free_vars_for_op(op) @@ -570,7 +570,7 @@ pass else: arglocs[0] = self.rm.make_sure_var_in_reg(vx) - args = op.sliceargs(0, op.numargs()) + args = op.getarglist() self.rm.possibly_free_vars(args) if guard_op is None: loc = self.rm.force_allocate_reg(op.result, args, @@ -594,7 +594,7 @@ def _consider_float_op(self, op): loc1 = self.xrm.loc(op.getarg(1)) - args = op.sliceargs(0, op.numargs()) + args = op.getarglist() loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0), args) self.Perform(op, [loc0, loc1], loc0) self.xrm.possibly_free_vars_for_op(op) @@ -605,7 +605,7 @@ consider_float_truediv = _consider_float_op def _consider_float_cmp(self, op, guard_op): - args = op.sliceargs(0, op.numargs()) + args = op.getarglist() loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0), args, imm_fine=False) loc1 = self.xrm.loc(op.getarg(1)) @@ -695,7 +695,7 @@ def consider_cond_call_gc_wb(self, op): assert op.result is None - args = op.sliceargs(0, op.numargs()) + args = op.getarglist() loc_newvalue = self.rm.make_sure_var_in_reg(op.getarg(1), args) # ^^^ we force loc_newvalue in a reg (unless it's a Const), # because it will be needed anyway by the following setfield_gc. @@ -849,7 +849,7 @@ need_lower_byte = True else: need_lower_byte = False - args = op.sliceargs(0, op.numargs()) + args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) value_loc = self.make_sure_var_in_reg(op.getarg(1), args, need_lower_byte=need_lower_byte) @@ -859,7 +859,7 @@ consider_setfield_raw = consider_setfield_gc def consider_strsetitem(self, op): - args = op.sliceargs(0, op.numargs()) + args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) value_loc = self.rm.make_sure_var_in_reg(op.getarg(2), args, @@ -871,7 +871,7 @@ def consider_setarrayitem_gc(self, op): scale, ofs, _, ptr = self._unpack_arraydescr(op.descr) - args = op.sliceargs(0, op.numargs()) + args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) if scale == 0: need_lower_byte = True @@ -888,7 +888,7 @@ def consider_getfield_gc(self, op): ofs_loc, size_loc, _ = self._unpack_fielddescr(op.descr) - args = op.sliceargs(0, op.numargs()) + args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.rm.possibly_free_vars(args) result_loc = self.force_allocate_reg(op.result) @@ -900,7 +900,7 @@ def consider_getarrayitem_gc(self, op): scale, ofs, _, _ = self._unpack_arraydescr(op.descr) - args = op.sliceargs(0, op.numargs()) + args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) self.rm.possibly_free_vars_for_op(op) @@ -930,7 +930,7 @@ #consider_cast_ptr_to_int = consider_same_as def consider_strlen(self, op): - args = op.sliceargs(0, op.numargs()) + args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.rm.possibly_free_vars_for_op(op) result_loc = self.rm.force_allocate_reg(op.result) @@ -942,14 +942,14 @@ arraydescr = op.descr assert isinstance(arraydescr, BaseArrayDescr) ofs = arraydescr.get_ofs_length(self.translate_support_code) - args = op.sliceargs(0, op.numargs()) + args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.rm.possibly_free_vars_for_op(op) result_loc = self.rm.force_allocate_reg(op.result) self.Perform(op, [base_loc, imm(ofs)], result_loc) def consider_strgetitem(self, op): - args = op.sliceargs(0, op.numargs()) + args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) self.rm.possibly_free_vars_for_op(op) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py Thu Sep 16 17:38:23 2010 @@ -546,7 +546,7 @@ # e.g. loop_tokens_done_with_this_frame_void[0] # Replace the operation with the real operation we want, i.e. a FINISH descr = target_loop_token.finishdescr - args = op.sliceargs(0, op.numargs()) + args = op.getarglist() new_op = ResOperation(rop.FINISH, args, None, descr=descr) new_loop.operations[-1] = new_op Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/graphpage.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/graphpage.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/graphpage.py Thu Sep 16 17:38:23 2010 @@ -191,7 +191,7 @@ def getlinks(self): boxes = {} for op in self.all_operations: - args = op.sliceargs(0, op.numargs()) + args = op.getarglist() args.append(op.result) for box in args: if getattr(box, 'is_box', False): Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py Thu Sep 16 17:38:23 2010 @@ -794,7 +794,7 @@ # RPython-friendly print '%r: inputargs =' % self, self._dump_args(self.inputargs) for op in self.operations: - args = op.sliceargs(0, op.numargs()) + args = op.getarglist() print '\t', op.getopname(), self._dump_args(args), \ self._dump_box(op.result) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/heap.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/heap.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/heap.py Thu Sep 16 17:38:23 2010 @@ -167,7 +167,7 @@ # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced" # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow" opnum = prevop.opnum - lastop_args = lastop.sliceargs(0, lastop.numargs()) + lastop_args = lastop.getarglist() if ((prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE or prevop.is_ovf()) and prevop.result not in lastop_args): Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py Thu Sep 16 17:38:23 2010 @@ -137,7 +137,7 @@ self.make_constant(op.result, op.getarg(0)) return # replace CALL_PURE with just CALL - args = op.sliceargs(1, op.numargs()) + args = op.getarglist()[1:] self.emit_operation(ResOperation(rop.CALL, args, op.result, op.descr)) def optimize_guard(self, op, constbox, emit_operation=True): Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py Thu Sep 16 17:38:23 2010 @@ -441,7 +441,7 @@ return # 0-length arraycopy descr = op.getarg(0) assert isinstance(descr, AbstractDescr) - args = op.sliceargs(1, op.numargs()) + args = op.getarglist()[1:] self.emit_operation(ResOperation(rop.CALL, args, op.result, descr)) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py Thu Sep 16 17:38:23 2010 @@ -2112,8 +2112,9 @@ op = self.history.operations.pop() assert op.opnum == rop.CALL_MAY_FORCE num_green_args = targetjitdriver_sd.num_green_args - greenargs = op.sliceargs(1, num_green_args+1) - args = op.sliceargs(num_green_args+1, op.numargs()) + arglist = op.getarglist() + greenargs = arglist[1:num_green_args+1] + args = arglist[num_green_args+1:] assert len(args) == targetjitdriver_sd.num_red_args vinfo = targetjitdriver_sd.virtualizable_info if vinfo is not None: Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py Thu Sep 16 17:38:23 2010 @@ -39,8 +39,8 @@ def setarglist(self, args): self._args = args - def sliceargs(self, start, stop): - return [self.getarg(i) for i in range(start, stop)] + def getarglist(self): + return self._args def setdescr(self, descr): # for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py Thu Sep 16 17:38:23 2010 @@ -14,10 +14,10 @@ if op.opnum == rop.ARRAYCOPY: descr = op.getarg(0) assert isinstance(descr, AbstractDescr) - args = op.sliceargs(1, op.numargs()) + args = op.getarglist()[1:] op = ResOperation(rop.CALL, args, op.result, descr=descr) elif op.opnum == rop.CALL_PURE: - args = op.sliceargs(1, op.numargs()) + args = op.getarglist()[1:] op = ResOperation(rop.CALL, args, op.result, op.descr) elif op.opnum == rop.VIRTUAL_REF: op = ResOperation(rop.SAME_AS, [op.getarg(0)], op.result) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/oparser.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/oparser.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/oparser.py Thu Sep 16 17:38:23 2010 @@ -26,7 +26,7 @@ def opboxes(operations): for op in operations: yield op.result - for box in op.sliceargs(0, op.numargs()): + for box in op.getarglist(): yield box def allboxes(): for box in self.inputargs: From arigo at codespeak.net Thu Sep 16 17:48:00 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 16 Sep 2010 17:48:00 +0200 (CEST) Subject: [pypy-svn] r77124 - pypy/branch/gen2-gc/pypy/rpython/memory/test Message-ID: <20100916154800.4E0CC282C23@codespeak.net> Author: arigo Date: Thu Sep 16 17:47:58 2010 New Revision: 77124 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py Log: Fix issues by not having an empty GcStruct. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py Thu Sep 16 17:47:58 2010 @@ -568,7 +568,7 @@ assert res == 111 def test_writebarrier_before_copy(self): - S = lltype.GcStruct('S') + S = lltype.GcStruct('S', ('x', lltype.Char)) TP = lltype.GcArray(lltype.Ptr(S)) def fn(): l = lltype.malloc(TP, 100) @@ -646,9 +646,6 @@ def test_finalizer_order(self): py.test.skip("Not implemented yet") - def test_writebarrier_before_copy(self): - py.test.skip("Not relevant, and crashes because llarena does not " - "support empty GcStructs") class TestHybridGC(TestGenerationalGC): from pypy.rpython.memory.gc.hybrid import HybridGC as GCClass From arigo at codespeak.net Thu Sep 16 18:52:47 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 16 Sep 2010 18:52:47 +0200 (CEST) Subject: [pypy-svn] r77125 - pypy/branch/gen2-gc/pypy/rpython/memory/gc Message-ID: <20100916165247.BE84F282BE3@codespeak.net> Author: arigo Date: Thu Sep 16 18:52:46 2010 New Revision: 77125 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Log: Add "official" debug_prints, and remove debug_check_consistency() after all nursery collects. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Thu Sep 16 18:52:46 2010 @@ -5,7 +5,7 @@ from pypy.rpython.memory.gc import minimarkpage from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask, r_uint -from pypy.rlib.debug import ll_assert, debug_print +from pypy.rlib.debug import ll_assert, debug_print, debug_start, debug_stop from pypy.rlib.objectmodel import we_are_translated WORD = LONG_BIT // 8 @@ -292,9 +292,6 @@ # we need to fix it with another call to minor_collection(). if self.nursery_next + totalsize > self.nursery_top: self.minor_collection() - # - else: - debug_print('minor collection') # result = self.nursery_next self.nursery_next = result + totalsize @@ -593,6 +590,8 @@ """Perform a minor collection: find the objects from the nursery that remain alive and move them out.""" # + debug_start("gc-minor") + # # First, find the roots that point to nursery objects. These # nursery objects are copied out of the nursery. Note that # references to further nursery objects are not modified by @@ -621,7 +620,10 @@ llarena.arena_reset(self.nursery, self.nursery_size, 2) self.nursery_next = self.nursery # - if not we_are_translated(): + debug_print("minor collect, total memory used:", + self.get_total_memory_used()) + debug_stop("gc-minor") + if 0: # not we_are_translated(): self.debug_check_consistency() # xxx expensive! @@ -728,7 +730,14 @@ def major_collection(self): """Do a major collection. Only for when the nursery is empty.""" # - debug_print('major collection:', self.get_total_memory_used()) + debug_start("gc-collect") + debug_print() + debug_print(".----------- Full collection ------------------") + debug_print("| used before collection:") + debug_print("| in ArenaCollection: ", + self.ac.total_memory_used, "bytes") + debug_print("| raw_malloced: ", + self.rawmalloced_total_size, "bytes") # # Debugging checks ll_assert(self.nursery_next == self.nursery, @@ -770,10 +779,17 @@ # self.debug_check_consistency() # - debug_print(' ->', self.get_total_memory_used()) self.next_major_collection_threshold = ( self.get_total_memory_used() * self.major_collection_threshold) # + debug_print("| used after collection:") + debug_print("| in ArenaCollection: ", + self.ac.total_memory_used, "bytes") + debug_print("| raw_malloced: ", + self.rawmalloced_total_size, "bytes") + debug_print("`----------------------------------------------") + debug_stop("gc-collect") + # # At the end, we can execute the finalizers of the objects # listed in 'run_finalizers'. Note that this will typically do # more allocations. From hakanardo at codespeak.net Thu Sep 16 19:25:46 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Thu, 16 Sep 2010 19:25:46 +0200 (CEST) Subject: [pypy-svn] r77126 - pypy/trunk/pypy/module/__builtin__ Message-ID: <20100916172546.273AB282BE3@codespeak.net> Author: hakanardo Date: Thu Sep 16 19:25:44 2010 New Revision: 77126 Modified: pypy/trunk/pypy/module/__builtin__/functional.py Log: only unroll the 2 arguments case Modified: pypy/trunk/pypy/module/__builtin__/functional.py ============================================================================== --- pypy/trunk/pypy/module/__builtin__/functional.py (original) +++ pypy/trunk/pypy/module/__builtin__/functional.py Thu Sep 16 19:25:44 2010 @@ -106,7 +106,7 @@ compare = space.lt args_w = args.arguments_w - if len(args_w) > 1 and len(args_w) < 10 and not args.keywords: + if len(args_w) == 2 and not args.keywords: # Unrollable case w_max_item = None for w_item in args_w: From fijal at codespeak.net Thu Sep 16 23:15:28 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Thu, 16 Sep 2010 23:15:28 +0200 (CEST) Subject: [pypy-svn] r77127 - pypy/branch/rsocket-improvements/pypy/rlib Message-ID: <20100916211528.283D0282BE3@codespeak.net> Author: fijal Date: Thu Sep 16 23:15:21 2010 New Revision: 77127 Modified: pypy/branch/rsocket-improvements/pypy/rlib/_rsocket_rffi.py Log: Make netpacket includes conditional. Should fix OS X issues Modified: pypy/branch/rsocket-improvements/pypy/rlib/_rsocket_rffi.py ============================================================================== --- pypy/branch/rsocket-improvements/pypy/rlib/_rsocket_rffi.py (original) +++ pypy/branch/rsocket-improvements/pypy/rlib/_rsocket_rffi.py Thu Sep 16 23:15:21 2010 @@ -32,11 +32,13 @@ 'arpa/inet.h', 'stdint.h', 'errno.h', - 'netpacket/packet.h', - 'sys/ioctl.h', - 'net/if.h', ) - cond_includes = [('AF_NETLINK', 'linux/netlink.h')] + + cond_includes = [('AF_NETLINK', 'linux/netlink.h'), + ('AF_PACKET', 'netpacket/packet.h'), + ('AF_PACKET', 'sys/ioctl.h'), + ('AF_PACKET', 'net/if.h')] + libraries = () calling_conv = 'c' HEADER = ''.join(['#include <%s>\n' % filename for filename in includes]) From fijal at codespeak.net Thu Sep 16 23:26:14 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Thu, 16 Sep 2010 23:26:14 +0200 (CEST) Subject: [pypy-svn] r77128 - in pypy/branch/rsocket-improvements/pypy/module/select: . test Message-ID: <20100916212614.C7C34282BE3@codespeak.net> Author: fijal Date: Thu Sep 16 23:26:13 2010 New Revision: 77128 Modified: pypy/branch/rsocket-improvements/pypy/module/select/interp_select.py pypy/branch/rsocket-improvements/pypy/module/select/test/test_select.py Log: A test and a "fix". It's debatable whether it's a bug or not, but someone complained. Modified: pypy/branch/rsocket-improvements/pypy/module/select/interp_select.py ============================================================================== --- pypy/branch/rsocket-improvements/pypy/module/select/interp_select.py (original) +++ pypy/branch/rsocket-improvements/pypy/module/select/interp_select.py Thu Sep 16 23:26:13 2010 @@ -54,14 +54,11 @@ if space.is_w(w_timeout, space.w_None): timeout = -1 else: - # rationale for computing directly integer, instead - # of float + math.cell is that - # we have for free overflow check and noone really - # cares (since CPython does not try too hard to have - # a ceiling of value) + # we want to be compatible with cpython and also accept things + # that can be casted to integer (I think) try: # compute the integer - timeout = space.int_w(w_timeout) + timeout = space.int_w(space.int(w_timeout)) except (OverflowError, ValueError): raise OperationError(space.w_ValueError, space.wrap("math range error")) Modified: pypy/branch/rsocket-improvements/pypy/module/select/test/test_select.py ============================================================================== --- pypy/branch/rsocket-improvements/pypy/module/select/test/test_select.py (original) +++ pypy/branch/rsocket-improvements/pypy/module/select/test/test_select.py Thu Sep 16 23:26:13 2010 @@ -210,6 +210,14 @@ assert len(res[2]) == 0 assert res[0][0] == res[1][0] + def test_poll(self): + import select + class A(object): + def __int__(self): + return 3 + + select.poll().poll(A()) # assert did not crash + class AppTestSelectWithPipes(_AppTestSelect): "Use a pipe to get pairs of file descriptors" def setup_class(cls): @@ -275,4 +283,3 @@ s1, addr2 = cls.sock.accept() return s1, s2 - From cfbolz at codespeak.net Fri Sep 17 10:47:31 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Fri, 17 Sep 2010 10:47:31 +0200 (CEST) Subject: [pypy-svn] r77129 - pypy/branch/better-map-instances/pypy/objspace/std Message-ID: <20100917084731.2F894282BD4@codespeak.net> Author: cfbolz Date: Fri Sep 17 10:47:28 2010 New Revision: 77129 Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Log: remove debug print Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Fri Sep 17 10:47:28 2010 @@ -612,8 +612,6 @@ def LOAD_ATTR_slowpath(pycode, w_obj, nameindex, map): space = pycode.space w_name = pycode.co_names_w[nameindex] - if space.str_w(w_name) == "task_holding": - print map if map is not None: w_type = map.w_cls w_descr = w_type.getattribute_if_not_from_object() From antocuni at codespeak.net Fri Sep 17 13:20:04 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Fri, 17 Sep 2010 13:20:04 +0200 (CEST) Subject: [pypy-svn] r77130 - in pypy/branch/resoperation-refactoring/pypy/jit/metainterp: . optimizeopt Message-ID: <20100917112004.A0B4A282B9E@codespeak.net> Author: antocuni Date: Fri Sep 17 13:20:02 2010 New Revision: 77130 Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py Log: (david, antocuni) - make ResOperation a factory function: in the future, it will select the right class to instantiate - add a copy_and_change method to "replace" the op with a new one. Start doing it in few places Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py Fri Sep 17 13:20:02 2010 @@ -331,7 +331,7 @@ self.metainterp_sd.profiler.count(jitprof.OPT_OPS) if op.is_guard(): self.metainterp_sd.profiler.count(jitprof.OPT_GUARDS) - self.store_final_boxes_in_guard(op) + op = self.store_final_boxes_in_guard(op) elif op.can_raise(): self.exception_might_have_happened = True elif op.returns_bool_result(): @@ -361,12 +361,13 @@ opnum = rop.GUARD_TRUE else: raise AssertionError("uh?") - op.opnum = opnum - # XXX XXX: fix me when the refactoring is complete - op._args = [op.getarg(0)] + newop = ResOperation(opnum, [op.getarg(0)], op.result, descr) + newop.fail_args = op.getfailargs() + return newop else: # a real GUARD_VALUE. Make it use one counter per value. descr.make_a_counter_per_value(op) + return op def make_args_key(self, op): args = [] Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py Fri Sep 17 13:20:02 2010 @@ -178,17 +178,16 @@ # guard_nonnull_class on this value, which is rather silly. # replace the original guard with a guard_value old_guard_op = self.optimizer.newoperations[value.last_guard_index] - old_opnum = old_guard_op.opnum - old_guard_op.opnum = rop.GUARD_VALUE - # XXX XXX: implement it when the refactoring is complete - old_guard_op._args = [old_guard_op.getarg(0), op.getarg(1)] + new_guard_op = old_guard_op.copy_and_change(rop.GUARD_VALUE, + args = [old_guard_op.getarg(0), op.getarg(1)]) + self.optimizer.newoperations[value.last_guard_index] = new_guard_op # hack hack hack. Change the guard_opnum on - # old_guard_op.descr so that when resuming, + # new_guard_op.descr so that when resuming, # the operation is not skipped by pyjitpl.py. - descr = old_guard_op.descr + descr = new_guard_op.descr assert isinstance(descr, compile.ResumeGuardDescr) descr.guard_opnum = rop.GUARD_VALUE - descr.make_a_counter_per_value(old_guard_op) + descr.make_a_counter_per_value(new_guard_op) emit_operation = False constbox = op.getarg(1) assert isinstance(constbox, Const) @@ -219,13 +218,13 @@ if old_guard_op.opnum == rop.GUARD_NONNULL: # it was a guard_nonnull, which we replace with a # guard_nonnull_class. - old_guard_op.opnum = rop.GUARD_NONNULL_CLASS - # XXX XXX: implement it when the refactoring is complete - old_guard_op._args = [old_guard_op.getarg(0), op.getarg(1)] + new_guard_op = old_guard_op.copy_and_change (rop.GUARD_NONNULL_CLASS, + args = [old_guard_op.getarg(0), op.getarg(1)]) + self.optimizer.newoperations[value.last_guard_index] = new_guard_op # hack hack hack. Change the guard_opnum on - # old_guard_op.descr so that when resuming, + # new_guard_op.descr so that when resuming, # the operation is not skipped by pyjitpl.py. - descr = old_guard_op.descr + descr = new_guard_op.descr assert isinstance(descr, compile.ResumeGuardDescr) descr.guard_opnum = rop.GUARD_NONNULL_CLASS emit_operation = False Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py Fri Sep 17 13:20:02 2010 @@ -324,7 +324,7 @@ vrefinfo = self.optimizer.metainterp_sd.virtualref_info # op.getarg(1) should really never point to null here # - set 'forced' to point to the real object - op1 = ResOperation(rop.SETFIELD_GC, op._args, None, + op1 = ResOperation(rop.SETFIELD_GC, op.getarglist(), None, descr = vrefinfo.descr_forced) self.optimize_SETFIELD_GC(op1) # - set 'virtual_token' to TOKEN_NONE Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py Fri Sep 17 13:20:02 2010 @@ -1,7 +1,10 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import make_sure_not_resized -class ResOperation(object): +def ResOperation(opnum, args, result, descr=None): + return BaseResOperation(opnum, args, result, descr) + +class BaseResOperation(object): """The central ResOperation class, representing one operation.""" # for 'guard_*' @@ -27,6 +30,19 @@ import pdb;pdb.set_trace() object.__setattr__(self, attr, value) + def copy_and_change(self, opnum, args=None, result=None, descr=None): + "shallow copy: the returned operation is meant to be used in place of self" + if args is None: + args = self.getarglist() + if result is None: + result = self.result + if descr is None: + descr = self.getdescr() + newop = ResOperation(opnum, args, result, descr) + #if isinstance(self, GuardOperation) + newop.setfailargs(self.getfailargs()) + return newop + def getarg(self, i): return self._args[i] @@ -42,6 +58,15 @@ def getarglist(self): return self._args + def getfailargs(self): + return self.fail_args + + def setfailargs(self, fail_args): + self.fail_args = fail_args + + def getdescr(self): + return self.descr + def setdescr(self, descr): # for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt # instance provided by the backend holding details about the type From cfbolz at codespeak.net Fri Sep 17 13:34:09 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Fri, 17 Sep 2010 13:34:09 +0200 (CEST) Subject: [pypy-svn] r77131 - pypy/extradoc/talk/pepm2011 Message-ID: <20100917113409.8E4A7282B9E@codespeak.net> Author: cfbolz Date: Fri Sep 17 13:34:08 2010 New Revision: 77131 Added: pypy/extradoc/talk/pepm2011/ pypy/extradoc/talk/pepm2011/Makefile pypy/extradoc/talk/pepm2011/paper.bib (contents, props changed) pypy/extradoc/talk/pepm2011/paper.tex pypy/extradoc/talk/pepm2011/sigplanconf.cls Log: optimistically adding a directory for a paper for PEPM 2011 Added: pypy/extradoc/talk/pepm2011/Makefile ============================================================================== --- (empty file) +++ pypy/extradoc/talk/pepm2011/Makefile Fri Sep 17 13:34:08 2010 @@ -0,0 +1,23 @@ + +escape-tracing.pdf: paper.tex paper.bib + pdflatex paper + bibtex paper + pdflatex paper + pdflatex paper + mv paper.pdf escape-tracing.pdf + +view: escape-tracing.pdf + evince escape-tracing.pdf & + +xpdf: escape-tracing.pdf + xpdf escape-tracing.pdf & + + +%.png: %.dot + dot -Tpng $< > $@ + +%.eps: %.dot + dot -Tps $< > $@ + +%.pdf: %.eps + epstopdf $< Added: pypy/extradoc/talk/pepm2011/paper.bib ============================================================================== Binary file. No diff available. Added: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- (empty file) +++ pypy/extradoc/talk/pepm2011/paper.tex Fri Sep 17 13:34:08 2010 @@ -0,0 +1,118 @@ +\documentclass{sigplanconf} + +\usepackage{ifthen} +\usepackage{fancyvrb} +\usepackage{color} +\usepackage{ulem} +\usepackage{xspace} +\usepackage{epsfig} +\usepackage{amssymb} +\usepackage{amsmath} +\usepackage{amsfonts} +\usepackage[utf8]{inputenc} + +\newboolean{showcomments} +\setboolean{showcomments}{false} +\ifthenelse{\boolean{showcomments}} + {\newcommand{\nb}[2]{ + \fbox{\bfseries\sffamily\scriptsize#1} + {\sf\small$\blacktriangleright$\textit{#2}$\blacktriangleleft$} + } + \newcommand{\version}{\emph{\scriptsize$-$Id: main.tex 19055 2008-06-05 11:20:31Z cfbolz $-$}} + } + {\newcommand{\nb}[2]{} + \newcommand{\version}{} + } + +\newcommand\cfbolz[1]{\nb{CFB}{#1}} +\newcommand\arigo[1]{\nb{AR}{#1}} +\newcommand\fijal[1]{\nb{FIJAL}{#1}} +\newcommand\david[1]{\nb{DAVID}{#1}} +\newcommand\reva[1]{\nb{Reviewer 1}{#1}} +\newcommand\revb[1]{\nb{Reviewer 2}{#1}} +\newcommand\revc[1]{\nb{Reviewer 3}{#1}} +\newcommand{\commentout}[1]{} + +\newcommand\ie{i.e.,\xspace} +\newcommand\eg{e.g.,\xspace} + +\normalem + +\let\oldcite=\cite + +\renewcommand\cite[1]{\ifthenelse{\equal{#1}{XXX}}{[citation~needed]}{\oldcite{#1}}} + +% +\def\sharedaffiliation{% +\end{tabular} +\begin{tabular}{c}} +% +\begin{document} +\conferenceinfo{PEPM'11,} {XXX} +\CopyrightYear{XXX} +\copyrightdata{XXX} + +\title{Escape Analysis and Specialization in a Tracing JIT} + +\authorinfo{Carl Friedrich Bolz \and Armin Rigo \and Antion Cuni \and Maciek Fija?kowski} + {Heinrich-Heine-Universit?t D?sseldorf, STUPS Group, Germany} + {cfbolz at gmx.de} + +%\numberofauthors{3} +%\author{ +%\alignauthor Carl Friedrich Bolz\\ +% \email{cfbolz at gmx.de} +%\alignauthor Michael Leuschel\\ +% \email{leuschel at cs.uni-duesseldorf.de} +%\alignauthor David Schneider\\ +% \email{david.schneider at uni-duesseldorf.de} +% \sharedaffiliation +% \affaddr{Heinrich-Heine-Universit?t D?sseldorf, STUPS Group, Germany}\\ +%} + +\maketitle +\begin{abstract} +\footnote{This research is partially supported by the BMBF funded project PyJIT (nr. 01QE0913B; +Eureka Eurostars).} +\end{abstract} + +% A category with the (minimum) three required fields +%\category{H.4}{Information Systems Applications}{Miscellaneous} +%A category including the fourth, optional field follows... +%\category{D.2.8}{Software Engineering}{Metrics}[complexity measures, performance measures] + +\category{D.3.4}{Programming Languages}{Processors}[code generation, +interpreters, run-time environments] + +\terms +Languages, Performance, Experimentation + +\keywords{XXX}% + +\section{Introduction} + +\section{Background} +\label{sec:Background} + +\subsection{PyPy} +\label{sub:PyPy} + +\subsection{Tracing JIT Compilers} +\label{sub:JIT_background} + + + +\section{Evaluation} +\label{sec:Evaluation} + + +\section{Related Work} +\label{sec:related} + +\section{Conclusions} +\label{sec:conclusions} + +\bibliographystyle{abbrv} +\bibliography{paper,michael} + +\end{document} Added: pypy/extradoc/talk/pepm2011/sigplanconf.cls ============================================================================== --- (empty file) +++ pypy/extradoc/talk/pepm2011/sigplanconf.cls Fri Sep 17 13:34:08 2010 @@ -0,0 +1,1250 @@ +%----------------------------------------------------------------------------- +% +% LaTeX Class/Style File +% +% Name: sigplanconf.cls +% Purpose: A LaTeX 2e class file for SIGPLAN conference proceedings. +% This class file supercedes acm_proc_article-sp, +% sig-alternate, and sigplan-proc. +% +% Author: Paul C. Anagnostopoulos +% Windfall Software +% 978 371-2316 +% paul at windfall.com +% +% Created: 12 September 2004 +% +% Revisions: See end of file. +% +%----------------------------------------------------------------------------- + + +\NeedsTeXFormat{LaTeX2e}[1995/12/01] +\ProvidesClass{sigplanconf}[2009/09/30 v2.3 ACM SIGPLAN Proceedings] + +% The following few pages contain LaTeX programming extensions adapted +% from the ZzTeX macro package. + +% Token Hackery +% ----- ------- + + +\def \@expandaftertwice {\expandafter\expandafter\expandafter} +\def \@expandafterthrice {\expandafter\expandafter\expandafter\expandafter + \expandafter\expandafter\expandafter} + +% This macro discards the next token. + +\def \@discardtok #1{}% token + +% This macro removes the `pt' following a dimension. + +{\catcode `\p = 12 \catcode `\t = 12 + +\gdef \@remover #1pt{#1} + +} % \catcode + +% This macro extracts the contents of a macro and returns it as plain text. +% Usage: \expandafter\@defof \meaning\macro\@mark + +\def \@defof #1:->#2\@mark{#2} + +% Control Sequence Names +% ------- -------- ----- + + +\def \@name #1{% {\tokens} + \csname \expandafter\@discardtok \string#1\endcsname} + +\def \@withname #1#2{% {\command}{\tokens} + \expandafter#1\csname \expandafter\@discardtok \string#2\endcsname} + +% Flags (Booleans) +% ----- ---------- + +% The boolean literals \@true and \@false are appropriate for use with +% the \if command, which tests the codes of the next two characters. + +\def \@true {TT} +\def \@false {FL} + +\def \@setflag #1=#2{\edef #1{#2}}% \flag = boolean + +% IF and Predicates +% -- --- ---------- + +% A "predicate" is a macro that returns \@true or \@false as its value. +% Such values are suitable for use with the \if conditional. For example: +% +% \if \@oddp{\x} \else \fi + +% A predicate can be used with \@setflag as follows: +% +% \@setflag \flag = {} + +% Here are the predicates for TeX's repertoire of conditional +% commands. These might be more appropriately interspersed with +% other definitions in this module, but what the heck. +% Some additional "obvious" predicates are defined. + +\def \@eqlp #1#2{\ifnum #1 = #2\@true \else \@false \fi} +\def \@neqlp #1#2{\ifnum #1 = #2\@false \else \@true \fi} +\def \@lssp #1#2{\ifnum #1 < #2\@true \else \@false \fi} +\def \@gtrp #1#2{\ifnum #1 > #2\@true \else \@false \fi} +\def \@zerop #1{\ifnum #1 = 0\@true \else \@false \fi} +\def \@onep #1{\ifnum #1 = 1\@true \else \@false \fi} +\def \@posp #1{\ifnum #1 > 0\@true \else \@false \fi} +\def \@negp #1{\ifnum #1 < 0\@true \else \@false \fi} +\def \@oddp #1{\ifodd #1\@true \else \@false \fi} +\def \@evenp #1{\ifodd #1\@false \else \@true \fi} +\def \@rangep #1#2#3{\if \@orp{\@lssp{#1}{#2}}{\@gtrp{#1}{#3}}\@false \else + \@true \fi} +\def \@tensp #1{\@rangep{#1}{10}{19}} + +\def \@dimeqlp #1#2{\ifdim #1 = #2\@true \else \@false \fi} +\def \@dimneqlp #1#2{\ifdim #1 = #2\@false \else \@true \fi} +\def \@dimlssp #1#2{\ifdim #1 < #2\@true \else \@false \fi} +\def \@dimgtrp #1#2{\ifdim #1 > #2\@true \else \@false \fi} +\def \@dimzerop #1{\ifdim #1 = 0pt\@true \else \@false \fi} +\def \@dimposp #1{\ifdim #1 > 0pt\@true \else \@false \fi} +\def \@dimnegp #1{\ifdim #1 < 0pt\@true \else \@false \fi} + +\def \@vmodep {\ifvmode \@true \else \@false \fi} +\def \@hmodep {\ifhmode \@true \else \@false \fi} +\def \@mathmodep {\ifmmode \@true \else \@false \fi} +\def \@textmodep {\ifmmode \@false \else \@true \fi} +\def \@innermodep {\ifinner \@true \else \@false \fi} + +\long\def \@codeeqlp #1#2{\if #1#2\@true \else \@false \fi} + +\long\def \@cateqlp #1#2{\ifcat #1#2\@true \else \@false \fi} + +\long\def \@tokeqlp #1#2{\ifx #1#2\@true \else \@false \fi} +\long\def \@xtokeqlp #1#2{\expandafter\ifx #1#2\@true \else \@false \fi} + +\long\def \@definedp #1{% + \expandafter\ifx \csname \expandafter\@discardtok \string#1\endcsname + \relax \@false \else \@true \fi} + +\long\def \@undefinedp #1{% + \expandafter\ifx \csname \expandafter\@discardtok \string#1\endcsname + \relax \@true \else \@false \fi} + +\def \@emptydefp #1{\ifx #1\@empty \@true \else \@false \fi}% {\name} + +\let \@emptylistp = \@emptydefp + +\long\def \@emptyargp #1{% {#n} + \@empargp #1\@empargq\@mark} +\long\def \@empargp #1#2\@mark{% + \ifx #1\@empargq \@true \else \@false \fi} +\def \@empargq {\@empargq} + +\def \@emptytoksp #1{% {\tokenreg} + \expandafter\@emptoksp \the#1\@mark} + +\long\def \@emptoksp #1\@mark{\@emptyargp{#1}} + +\def \@voidboxp #1{\ifvoid #1\@true \else \@false \fi} +\def \@hboxp #1{\ifhbox #1\@true \else \@false \fi} +\def \@vboxp #1{\ifvbox #1\@true \else \@false \fi} + +\def \@eofp #1{\ifeof #1\@true \else \@false \fi} + + +% Flags can also be used as predicates, as in: +% +% \if \flaga \else \fi + + +% Now here we have predicates for the common logical operators. + +\def \@notp #1{\if #1\@false \else \@true \fi} + +\def \@andp #1#2{\if #1% + \if #2\@true \else \@false \fi + \else + \@false + \fi} + +\def \@orp #1#2{\if #1% + \@true + \else + \if #2\@true \else \@false \fi + \fi} + +\def \@xorp #1#2{\if #1% + \if #2\@false \else \@true \fi + \else + \if #2\@true \else \@false \fi + \fi} + +% Arithmetic +% ---------- + +\def \@increment #1{\advance #1 by 1\relax}% {\count} + +\def \@decrement #1{\advance #1 by -1\relax}% {\count} + +% Options +% ------- + + +\@setflag \@authoryear = \@false +\@setflag \@blockstyle = \@false +\@setflag \@copyrightwanted = \@true +\@setflag \@explicitsize = \@false +\@setflag \@mathtime = \@false +\@setflag \@natbib = \@true +\@setflag \@ninepoint = \@true +\newcount{\@numheaddepth} \@numheaddepth = 3 +\@setflag \@onecolumn = \@false +\@setflag \@preprint = \@false +\@setflag \@reprint = \@false +\@setflag \@tenpoint = \@false +\@setflag \@times = \@false + +% Note that all the dangerous article class options are trapped. + +\DeclareOption{9pt}{\@setflag \@ninepoint = \@true + \@setflag \@explicitsize = \@true} + +\DeclareOption{10pt}{\PassOptionsToClass{10pt}{article}% + \@setflag \@ninepoint = \@false + \@setflag \@tenpoint = \@true + \@setflag \@explicitsize = \@true} + +\DeclareOption{11pt}{\PassOptionsToClass{11pt}{article}% + \@setflag \@ninepoint = \@false + \@setflag \@explicitsize = \@true} + +\DeclareOption{12pt}{\@unsupportedoption{12pt}} + +\DeclareOption{a4paper}{\@unsupportedoption{a4paper}} + +\DeclareOption{a5paper}{\@unsupportedoption{a5paper}} + +\DeclareOption{authoryear}{\@setflag \@authoryear = \@true} + +\DeclareOption{b5paper}{\@unsupportedoption{b5paper}} + +\DeclareOption{blockstyle}{\@setflag \@blockstyle = \@true} + +\DeclareOption{cm}{\@setflag \@times = \@false} + +\DeclareOption{computermodern}{\@setflag \@times = \@false} + +\DeclareOption{executivepaper}{\@unsupportedoption{executivepaper}} + +\DeclareOption{indentedstyle}{\@setflag \@blockstyle = \@false} + +\DeclareOption{landscape}{\@unsupportedoption{landscape}} + +\DeclareOption{legalpaper}{\@unsupportedoption{legalpaper}} + +\DeclareOption{letterpaper}{\@unsupportedoption{letterpaper}} + +\DeclareOption{mathtime}{\@setflag \@mathtime = \@true} + +\DeclareOption{natbib}{\@setflag \@natbib = \@true} + +\DeclareOption{nonatbib}{\@setflag \@natbib = \@false} + +\DeclareOption{nocopyrightspace}{\@setflag \@copyrightwanted = \@false} + +\DeclareOption{notitlepage}{\@unsupportedoption{notitlepage}} + +\DeclareOption{numberedpars}{\@numheaddepth = 4} + +\DeclareOption{numbers}{\@setflag \@authoryear = \@false} + +%%%\DeclareOption{onecolumn}{\@setflag \@onecolumn = \@true} + +\DeclareOption{preprint}{\@setflag \@preprint = \@true} + +\DeclareOption{reprint}{\@setflag \@reprint = \@true} + +\DeclareOption{times}{\@setflag \@times = \@true} + +\DeclareOption{titlepage}{\@unsupportedoption{titlepage}} + +\DeclareOption{twocolumn}{\@setflag \@onecolumn = \@false} + +\DeclareOption*{\PassOptionsToClass{\CurrentOption}{article}} + +\ExecuteOptions{9pt,indentedstyle,times} +\@setflag \@explicitsize = \@false +\ProcessOptions + +\if \@onecolumn + \if \@notp{\@explicitsize}% + \@setflag \@ninepoint = \@false + \PassOptionsToClass{11pt}{article}% + \fi + \PassOptionsToClass{twoside,onecolumn}{article} +\else + \PassOptionsToClass{twoside,twocolumn}{article} +\fi +\LoadClass{article} + +\def \@unsupportedoption #1{% + \ClassError{proc}{The standard '#1' option is not supported.}} + +% This can be used with the 'reprint' option to get the final folios. + +\def \setpagenumber #1{% + \setcounter{page}{#1}} + +\AtEndDocument{\label{sigplanconf at finalpage}} + +% Utilities +% --------- + + +\newcommand{\setvspace}[2]{% + #1 = #2 + \advance #1 by -1\parskip} + +% Document Parameters +% -------- ---------- + + +% Page: + +\setlength{\hoffset}{-1in} +\setlength{\voffset}{-1in} + +\setlength{\topmargin}{1in} +\setlength{\headheight}{0pt} +\setlength{\headsep}{0pt} + +\if \@onecolumn + \setlength{\evensidemargin}{.75in} + \setlength{\oddsidemargin}{.75in} +\else + \setlength{\evensidemargin}{.75in} + \setlength{\oddsidemargin}{.75in} +\fi + +% Text area: + +\newdimen{\standardtextwidth} +\setlength{\standardtextwidth}{42pc} + +\if \@onecolumn + \setlength{\textwidth}{40.5pc} +\else + \setlength{\textwidth}{\standardtextwidth} +\fi + +\setlength{\topskip}{8pt} +\setlength{\columnsep}{2pc} +\setlength{\textheight}{54.5pc} + +% Running foot: + +\setlength{\footskip}{30pt} + +% Paragraphs: + +\if \@blockstyle + \setlength{\parskip}{5pt plus .1pt minus .5pt} + \setlength{\parindent}{0pt} +\else + \setlength{\parskip}{0pt} + \setlength{\parindent}{12pt} +\fi + +\setlength{\lineskip}{.5pt} +\setlength{\lineskiplimit}{\lineskip} + +\frenchspacing +\pretolerance = 400 +\tolerance = \pretolerance +\setlength{\emergencystretch}{5pt} +\clubpenalty = 10000 +\widowpenalty = 10000 +\setlength{\hfuzz}{.5pt} + +% Standard vertical spaces: + +\newskip{\standardvspace} +\setvspace{\standardvspace}{5pt plus 1pt minus .5pt} + +% Margin paragraphs: + +\setlength{\marginparwidth}{36pt} +\setlength{\marginparsep}{2pt} +\setlength{\marginparpush}{8pt} + + +\setlength{\skip\footins}{8pt plus 3pt minus 1pt} +\setlength{\footnotesep}{9pt} + +\renewcommand{\footnoterule}{% + \hrule width .5\columnwidth height .33pt depth 0pt} + +\renewcommand{\@makefntext}[1]{% + \noindent \@makefnmark \hspace{1pt}#1} + +% Floats: + +\setcounter{topnumber}{4} +\setcounter{bottomnumber}{1} +\setcounter{totalnumber}{4} + +\renewcommand{\fps at figure}{tp} +\renewcommand{\fps at table}{tp} +\renewcommand{\topfraction}{0.90} +\renewcommand{\bottomfraction}{0.30} +\renewcommand{\textfraction}{0.10} +\renewcommand{\floatpagefraction}{0.75} + +\setcounter{dbltopnumber}{4} + +\renewcommand{\dbltopfraction}{\topfraction} +\renewcommand{\dblfloatpagefraction}{\floatpagefraction} + +\setlength{\floatsep}{18pt plus 4pt minus 2pt} +\setlength{\textfloatsep}{18pt plus 4pt minus 3pt} +\setlength{\intextsep}{10pt plus 4pt minus 3pt} + +\setlength{\dblfloatsep}{18pt plus 4pt minus 2pt} +\setlength{\dbltextfloatsep}{20pt plus 4pt minus 3pt} + +% Miscellaneous: + +\errorcontextlines = 5 + +% Fonts +% ----- + + +\if \@times + \renewcommand{\rmdefault}{ptm}% + \if \@mathtime + \usepackage[mtbold,noTS1]{mathtime}% + \else +%%% \usepackage{mathptm}% + \fi +\else + \relax +\fi + +\if \@ninepoint + +\renewcommand{\normalsize}{% + \@setfontsize{\normalsize}{9pt}{10pt}% + \setlength{\abovedisplayskip}{5pt plus 1pt minus .5pt}% + \setlength{\belowdisplayskip}{\abovedisplayskip}% + \setlength{\abovedisplayshortskip}{3pt plus 1pt minus 2pt}% + \setlength{\belowdisplayshortskip}{\abovedisplayshortskip}} + +\renewcommand{\tiny}{\@setfontsize{\tiny}{5pt}{6pt}} + +\renewcommand{\scriptsize}{\@setfontsize{\scriptsize}{7pt}{8pt}} + +\renewcommand{\small}{% + \@setfontsize{\small}{8pt}{9pt}% + \setlength{\abovedisplayskip}{4pt plus 1pt minus 1pt}% + \setlength{\belowdisplayskip}{\abovedisplayskip}% + \setlength{\abovedisplayshortskip}{2pt plus 1pt}% + \setlength{\belowdisplayshortskip}{\abovedisplayshortskip}} + +\renewcommand{\footnotesize}{% + \@setfontsize{\footnotesize}{8pt}{9pt}% + \setlength{\abovedisplayskip}{4pt plus 1pt minus .5pt}% + \setlength{\belowdisplayskip}{\abovedisplayskip}% + \setlength{\abovedisplayshortskip}{2pt plus 1pt}% + \setlength{\belowdisplayshortskip}{\abovedisplayshortskip}} + +\renewcommand{\large}{\@setfontsize{\large}{11pt}{13pt}} + +\renewcommand{\Large}{\@setfontsize{\Large}{14pt}{18pt}} + +\renewcommand{\LARGE}{\@setfontsize{\LARGE}{18pt}{20pt}} + +\renewcommand{\huge}{\@setfontsize{\huge}{20pt}{25pt}} + +\renewcommand{\Huge}{\@setfontsize{\Huge}{25pt}{30pt}} + +\else\if \@tenpoint + +\relax + +\else + +\relax + +\fi\fi + +% Abstract +% -------- + + +\renewenvironment{abstract}{% + \section*{Abstract}% + \normalsize}{% + } + +% Bibliography +% ------------ + + +\renewenvironment{thebibliography}[1] + {\section*{\refname + \@mkboth{\MakeUppercase\refname}{\MakeUppercase\refname}}% + \list{\@biblabel{\@arabic\c at enumiv}}% + {\settowidth\labelwidth{\@biblabel{#1}}% + \leftmargin\labelwidth + \advance\leftmargin\labelsep + \@openbib at code + \usecounter{enumiv}% + \let\p at enumiv\@empty + \renewcommand\theenumiv{\@arabic\c at enumiv}}% + \bibfont + \clubpenalty4000 + \@clubpenalty \clubpenalty + \widowpenalty4000% + \sfcode`\.\@m} + {\def\@noitemerr + {\@latex at warning{Empty `thebibliography' environment}}% + \endlist} + +\if \@natbib + +\if \@authoryear + \typeout{Using natbib package with 'authoryear' citation style.} + \usepackage[authoryear,sort,square]{natbib} + \bibpunct{[}{]}{;}{a}{}{,} % Change citation separator to semicolon, + % eliminate comma between author and year. + \let \cite = \citep +\else + \typeout{Using natbib package with 'numbers' citation style.} + \usepackage[numbers,sort&compress,square]{natbib} +\fi +\setlength{\bibsep}{3pt plus .5pt minus .25pt} + +\fi + +\def \bibfont {\small} + +% Categories +% ---------- + + +\@setflag \@firstcategory = \@true + +\newcommand{\category}[3]{% + \if \@firstcategory + \paragraph*{Categories and Subject Descriptors}% + \@setflag \@firstcategory = \@false + \else + \unskip ;\hspace{.75em}% + \fi + \@ifnextchar [{\@category{#1}{#2}{#3}}{\@category{#1}{#2}{#3}[]}} + +\def \@category #1#2#3[#4]{% + {\let \and = \relax + #1 [\textit{#2}]% + \if \@emptyargp{#4}% + \if \@notp{\@emptyargp{#3}}: #3\fi + \else + :\space + \if \@notp{\@emptyargp{#3}}#3---\fi + \textrm{#4}% + \fi}} + +% Copyright Notice +% --------- ------ + + +\def \ftype at copyrightbox {8} +\def \@toappear {} +\def \@permission {} +\def \@reprintprice {} + +\def \@copyrightspace {% + \@float{copyrightbox}[b]% + \vbox to 1in{% + \vfill + \parbox[b]{20pc}{% + \scriptsize + \if \@preprint + [Copyright notice will appear here + once 'preprint' option is removed.]\par + \else + \@toappear + \fi + \if \@reprint + \noindent Reprinted from \@conferencename, + \@proceedings, + \@conferenceinfo, + pp.~\number\thepage--\pageref{sigplanconf at finalpage}.\par + \fi}}% + \end at float} + +\long\def \toappear #1{% + \def \@toappear {#1}} + +\toappear{% + \noindent \@permission \par + \vspace{2pt} + \noindent \textsl{\@conferencename}\quad \@conferenceinfo \par + \noindent Copyright \copyright\ \@copyrightyear\ ACM \@copyrightdata + \dots \@reprintprice\par} + +\newcommand{\permission}[1]{% + \gdef \@permission {#1}} + +\permission{% + Permission to make digital or hard copies of all or + part of this work for personal or classroom use is granted without + fee provided that copies are not made or distributed for profit or + commercial advantage and that copies bear this notice and the full + citation on the first page. To copy otherwise, to republish, to + post on servers or to redistribute to lists, requires prior specific + permission and/or a fee.} + +% Here we have some alternate permission statements and copyright lines: + +\newcommand{\ACMCanadapermission}{% + \permission{% + Copyright \@copyrightyear\ Association for Computing Machinery. + ACM acknowledges that + this contribution was authored or co-authored by an affiliate of the + National Research Council of Canada (NRC). + As such, the Crown in Right of + Canada retains an equal interest in the copyright, however granting + nonexclusive, royalty-free right to publish or reproduce this article, + or to allow others to do so, provided that clear attribution + is also given to the authors and the NRC.}} + +\newcommand{\ACMUSpermission}{% + \permission{% + Copyright \@copyrightyear\ Association for + Computing Machinery. ACM acknowledges that + this contribution was authored or co-authored + by a contractor or affiliate + of the U.S. Government. As such, the Government retains a nonexclusive, + royalty-free right to publish or reproduce this article, + or to allow others to do so, for Government purposes only.}} + +\newcommand{\authorpermission}{% + \permission{% + Copyright is held by the author/owner(s).} + \toappear{% + \noindent \@permission \par + \vspace{2pt} + \noindent \textsl{\@conferencename}\quad \@conferenceinfo \par + ACM \@copyrightdata.}} + +\newcommand{\Sunpermission}{% + \permission{% + Copyright is held by Sun Microsystems, Inc.}% + \toappear{% + \noindent \@permission \par + \vspace{2pt} + \noindent \textsl{\@conferencename}\quad \@conferenceinfo \par + ACM \@copyrightdata.}} + +\newcommand{\USpublicpermission}{% + \permission{% + This paper is authored by an employee(s) of the United States + Government and is in the public domain.}% + \toappear{% + \noindent \@permission \par + \vspace{2pt} + \noindent \textsl{\@conferencename}\quad \@conferenceinfo \par + ACM \@copyrightdata.}} + +\newcommand{\reprintprice}[1]{% + \gdef \@reprintprice {#1}} + +\reprintprice{\$10.00} + +% Enunciations +% ------------ + + +\def \@begintheorem #1#2{% {name}{number} + \trivlist + \item[\hskip \labelsep \textsc{#1 #2.}]% + \itshape\selectfont + \ignorespaces} + +\def \@opargbegintheorem #1#2#3{% {name}{number}{title} + \trivlist + \item[% + \hskip\labelsep \textsc{#1\ #2}% + \if \@notp{\@emptyargp{#3}}\nut (#3).\fi]% + \itshape\selectfont + \ignorespaces} + +% Figures +% ------- + + +\@setflag \@caprule = \@true + +\long\def \@makecaption #1#2{% + \addvspace{4pt} + \if \@caprule + \hrule width \hsize height .33pt + \vspace{4pt} + \fi + \setbox \@tempboxa = \hbox{\@setfigurenumber{#1.}\nut #2}% + \if \@dimgtrp{\wd\@tempboxa}{\hsize}% + \noindent \@setfigurenumber{#1.}\nut #2\par + \else + \centerline{\box\@tempboxa}% + \fi} + +\newcommand{\nocaptionrule}{% + \@setflag \@caprule = \@false} + +\def \@setfigurenumber #1{% + {\rmfamily \bfseries \selectfont #1}} + +% Hierarchy +% --------- + + +\setcounter{secnumdepth}{\@numheaddepth} + +\newskip{\@sectionaboveskip} +\setvspace{\@sectionaboveskip}{10pt plus 3pt minus 2pt} + +\newskip{\@sectionbelowskip} +\if \@blockstyle + \setlength{\@sectionbelowskip}{0.1pt}% +\else + \setlength{\@sectionbelowskip}{4pt}% +\fi + +\renewcommand{\section}{% + \@startsection + {section}% + {1}% + {0pt}% + {-\@sectionaboveskip}% + {\@sectionbelowskip}% + {\large \bfseries \raggedright}} + +\newskip{\@subsectionaboveskip} +\setvspace{\@subsectionaboveskip}{8pt plus 2pt minus 2pt} + +\newskip{\@subsectionbelowskip} +\if \@blockstyle + \setlength{\@subsectionbelowskip}{0.1pt}% +\else + \setlength{\@subsectionbelowskip}{4pt}% +\fi + +\renewcommand{\subsection}{% + \@startsection% + {subsection}% + {2}% + {0pt}% + {-\@subsectionaboveskip}% + {\@subsectionbelowskip}% + {\normalsize \bfseries \raggedright}} + +\renewcommand{\subsubsection}{% + \@startsection% + {subsubsection}% + {3}% + {0pt}% + {-\@subsectionaboveskip} + {\@subsectionbelowskip}% + {\normalsize \bfseries \raggedright}} + +\newskip{\@paragraphaboveskip} +\setvspace{\@paragraphaboveskip}{6pt plus 2pt minus 2pt} + +\renewcommand{\paragraph}{% + \@startsection% + {paragraph}% + {4}% + {0pt}% + {\@paragraphaboveskip} + {-1em}% + {\normalsize \bfseries \if \@times \itshape \fi}} + +\renewcommand{\subparagraph}{% + \@startsection% + {subparagraph}% + {4}% + {0pt}% + {\@paragraphaboveskip} + {-1em}% + {\normalsize \itshape}} + +% Standard headings: + +\newcommand{\acks}{\section*{Acknowledgments}} + +\newcommand{\keywords}{\paragraph*{Keywords}} + +\newcommand{\terms}{\paragraph*{General Terms}} + +% Identification +% -------------- + + +\def \@conferencename {} +\def \@conferenceinfo {} +\def \@copyrightyear {} +\def \@copyrightdata {[to be supplied]} +\def \@proceedings {[Unknown Proceedings]} + + +\newcommand{\conferenceinfo}[2]{% + \gdef \@conferencename {#1}% + \gdef \@conferenceinfo {#2}} + +\newcommand{\copyrightyear}[1]{% + \gdef \@copyrightyear {#1}} + +\let \CopyrightYear = \copyrightyear + +\newcommand{\copyrightdata}[1]{% + \gdef \@copyrightdata {#1}} + +\let \crdata = \copyrightdata + +\newcommand{\proceedings}[1]{% + \gdef \@proceedings {#1}} + +% Lists +% ----- + + +\setlength{\leftmargini}{13pt} +\setlength\leftmarginii{13pt} +\setlength\leftmarginiii{13pt} +\setlength\leftmarginiv{13pt} +\setlength{\labelsep}{3.5pt} + +\setlength{\topsep}{\standardvspace} +\if \@blockstyle + \setlength{\itemsep}{1pt} + \setlength{\parsep}{3pt} +\else + \setlength{\itemsep}{1pt} + \setlength{\parsep}{3pt} +\fi + +\renewcommand{\labelitemi}{{\small \centeroncapheight{\textbullet}}} +\renewcommand{\labelitemii}{\centeroncapheight{\rule{2.5pt}{2.5pt}}} +\renewcommand{\labelitemiii}{$-$} +\renewcommand{\labelitemiv}{{\Large \textperiodcentered}} + +\renewcommand{\@listi}{% + \leftmargin = \leftmargini + \listparindent = 0pt} +%%% \itemsep = 1pt +%%% \parsep = 3pt} +%%% \listparindent = \parindent} + +\let \@listI = \@listi + +\renewcommand{\@listii}{% + \leftmargin = \leftmarginii + \topsep = 1pt + \labelwidth = \leftmarginii + \advance \labelwidth by -\labelsep + \listparindent = \parindent} + +\renewcommand{\@listiii}{% + \leftmargin = \leftmarginiii + \labelwidth = \leftmarginiii + \advance \labelwidth by -\labelsep + \listparindent = \parindent} + +\renewcommand{\@listiv}{% + \leftmargin = \leftmarginiv + \labelwidth = \leftmarginiv + \advance \labelwidth by -\labelsep + \listparindent = \parindent} + +% Mathematics +% ----------- + + +\def \theequation {\arabic{equation}} + +% Miscellaneous +% ------------- + + +\newcommand{\balancecolumns}{% + \vfill\eject + \global\@colht = \textheight + \global\ht\@cclv = \textheight} + +\newcommand{\nut}{\hspace{.5em}} + +\newcommand{\softraggedright}{% + \let \\ = \@centercr + \leftskip = 0pt + \rightskip = 0pt plus 10pt} + +% Program Code +% ------- ---- + + +\newcommand{\mono}[1]{% + {\@tempdima = \fontdimen2\font + \texttt{\spaceskip = 1.1\@tempdima #1}}} + +% Running Heads and Feet +% ------- ----- --- ---- + + +\def \@preprintfooter {} + +\newcommand{\preprintfooter}[1]{% + \gdef \@preprintfooter {#1}} + +\if \@preprint + +\def \ps at plain {% + \let \@mkboth = \@gobbletwo + \let \@evenhead = \@empty + \def \@evenfoot {\scriptsize \textit{\@preprintfooter}\hfil \thepage \hfil + \textit{\@formatyear}}% + \let \@oddhead = \@empty + \let \@oddfoot = \@evenfoot} + +\else\if \@reprint + +\def \ps at plain {% + \let \@mkboth = \@gobbletwo + \let \@evenhead = \@empty + \def \@evenfoot {\scriptsize \hfil \thepage \hfil}% + \let \@oddhead = \@empty + \let \@oddfoot = \@evenfoot} + +\else + +\let \ps at plain = \ps at empty +\let \ps at headings = \ps at empty +\let \ps at myheadings = \ps at empty + +\fi\fi + +\def \@formatyear {% + \number\year/\number\month/\number\day} + +% Special Characters +% ------- ---------- + + +\DeclareRobustCommand{\euro}{% + \protect{\rlap{=}}{\sf \kern .1em C}} + +% Title Page +% ----- ---- + + +\@setflag \@addauthorsdone = \@false + +\def \@titletext {\@latex at error{No title was provided}{}} +\def \@subtitletext {} + +\newcount{\@authorcount} + +\newcount{\@titlenotecount} +\newtoks{\@titlenotetext} + +\def \@titlebanner {} + +\renewcommand{\title}[1]{% + \gdef \@titletext {#1}} + +\newcommand{\subtitle}[1]{% + \gdef \@subtitletext {#1}} + +\newcommand{\authorinfo}[3]{% {names}{affiliation}{email/URL} + \global\@increment \@authorcount + \@withname\gdef {\@authorname\romannumeral\@authorcount}{#1}% + \@withname\gdef {\@authoraffil\romannumeral\@authorcount}{#2}% + \@withname\gdef {\@authoremail\romannumeral\@authorcount}{#3}} + +\renewcommand{\author}[1]{% + \@latex at error{The \string\author\space command is obsolete; + use \string\authorinfo}{}} + +\newcommand{\titlebanner}[1]{% + \gdef \@titlebanner {#1}} + +\renewcommand{\maketitle}{% + \pagestyle{plain}% + \if \@onecolumn + {\hsize = \standardtextwidth + \@maketitle}% + \else + \twocolumn[\@maketitle]% + \fi + \@placetitlenotes + \if \@copyrightwanted \@copyrightspace \fi} + +\def \@maketitle {% + \begin{center} + \@settitlebanner + \let \thanks = \titlenote + {\leftskip = 0pt plus 0.25\linewidth + \rightskip = 0pt plus 0.25 \linewidth + \parfillskip = 0pt + \spaceskip = .7em + \noindent \LARGE \bfseries \@titletext \par} + \vskip 6pt + \noindent \Large \@subtitletext \par + \vskip 12pt + \ifcase \@authorcount + \@latex at error{No authors were specified for this paper}{}\or + \@titleauthors{i}{}{}\or + \@titleauthors{i}{ii}{}\or + \@titleauthors{i}{ii}{iii}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{}{}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}% + \@titleauthors{vii}{}{}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}% + \@titleauthors{vii}{viii}{}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}% + \@titleauthors{vii}{viii}{ix}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}% + \@titleauthors{vii}{viii}{ix}\@titleauthors{x}{}{}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}% + \@titleauthors{vii}{viii}{ix}\@titleauthors{x}{xi}{}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}% + \@titleauthors{vii}{viii}{ix}\@titleauthors{x}{xi}{xii}% + \else + \@latex at error{Cannot handle more than 12 authors}{}% + \fi + \vspace{1.75pc} + \end{center}} + +\def \@settitlebanner {% + \if \@andp{\@preprint}{\@notp{\@emptydefp{\@titlebanner}}}% + \vbox to 0pt{% + \vskip -32pt + \noindent \textbf{\@titlebanner}\par + \vss}% + \nointerlineskip + \fi} + +\def \@titleauthors #1#2#3{% + \if \@andp{\@emptyargp{#2}}{\@emptyargp{#3}}% + \noindent \@setauthor{40pc}{#1}{\@false}\par + \else\if \@emptyargp{#3}% + \noindent \@setauthor{17pc}{#1}{\@false}\hspace{3pc}% + \@setauthor{17pc}{#2}{\@false}\par + \else + \noindent \@setauthor{12.5pc}{#1}{\@false}\hspace{2pc}% + \@setauthor{12.5pc}{#2}{\@false}\hspace{2pc}% + \@setauthor{12.5pc}{#3}{\@true}\par + \relax + \fi\fi + \vspace{20pt}} + +\def \@setauthor #1#2#3{% {width}{text}{unused} + \vtop{% + \def \and {% + \hspace{16pt}} + \hsize = #1 + \normalfont + \centering + \large \@name{\@authorname#2}\par + \vspace{5pt} + \normalsize \@name{\@authoraffil#2}\par + \vspace{2pt} + \textsf{\@name{\@authoremail#2}}\par}} + +\def \@maybetitlenote #1{% + \if \@andp{#1}{\@gtrp{\@authorcount}{3}}% + \titlenote{See page~\pageref{@addauthors} for additional authors.}% + \fi} + +\newtoks{\@fnmark} + +\newcommand{\titlenote}[1]{% + \global\@increment \@titlenotecount + \ifcase \@titlenotecount \relax \or + \@fnmark = {\ast}\or + \@fnmark = {\dagger}\or + \@fnmark = {\ddagger}\or + \@fnmark = {\S}\or + \@fnmark = {\P}\or + \@fnmark = {\ast\ast}% + \fi + \,$^{\the\@fnmark}$% + \edef \reserved at a {\noexpand\@appendtotext{% + \noexpand\@titlefootnote{\the\@fnmark}}}% + \reserved at a{#1}} + +\def \@appendtotext #1#2{% + \global\@titlenotetext = \expandafter{\the\@titlenotetext #1{#2}}} + +\newcount{\@authori} + +\iffalse +\def \additionalauthors {% + \if \@gtrp{\@authorcount}{3}% + \section{Additional Authors}% + \label{@addauthors}% + \noindent + \@authori = 4 + {\let \\ = ,% + \loop + \textbf{\@name{\@authorname\romannumeral\@authori}}, + \@name{\@authoraffil\romannumeral\@authori}, + email: \@name{\@authoremail\romannumeral\@authori}.% + \@increment \@authori + \if \@notp{\@gtrp{\@authori}{\@authorcount}} \repeat}% + \par + \fi + \global\@setflag \@addauthorsdone = \@true} +\fi + +\let \addauthorsection = \additionalauthors + +\def \@placetitlenotes { + \the\@titlenotetext} + +% Utilities +% --------- + + +\newcommand{\centeroncapheight}[1]{% + {\setbox\@tempboxa = \hbox{#1}% + \@measurecapheight{\@tempdima}% % Calculate ht(CAP) - ht(text) + \advance \@tempdima by -\ht\@tempboxa % ------------------ + \divide \@tempdima by 2 % 2 + \raise \@tempdima \box\@tempboxa}} + +\newbox{\@measbox} + +\def \@measurecapheight #1{% {\dimen} + \setbox\@measbox = \hbox{ABCDEFGHIJKLMNOPQRSTUVWXYZ}% + #1 = \ht\@measbox} + +\long\def \@titlefootnote #1#2{% + \insert\footins{% + \reset at font\footnotesize + \interlinepenalty\interfootnotelinepenalty + \splittopskip\footnotesep + \splitmaxdepth \dp\strutbox \floatingpenalty \@MM + \hsize\columnwidth \@parboxrestore +%%% \protected at edef\@currentlabel{% +%%% \csname p at footnote\endcsname\@thefnmark}% + \color at begingroup + \def \@makefnmark {$^{#1}$}% + \@makefntext{% + \rule\z@\footnotesep\ignorespaces#2\@finalstrut\strutbox}% + \color at endgroup}} + +% LaTeX Modifications +% ----- ------------- + +\def \@seccntformat #1{% + \@name{\the#1}% + \@expandaftertwice\@seccntformata \csname the#1\endcsname.\@mark + \quad} + +\def \@seccntformata #1.#2\@mark{% + \if \@emptyargp{#2}.\fi} + +% Revision History +% -------- ------- + + +% Date Person Ver. Change +% ---- ------ ---- ------ + +% 2004.09.12 PCA 0.1--5 Preliminary development. + +% 2004.11.18 PCA 0.5 Start beta testing. + +% 2004.11.19 PCA 0.6 Obsolete \author and replace with +% \authorinfo. +% Add 'nocopyrightspace' option. +% Compress article opener spacing. +% Add 'mathtime' option. +% Increase text height by 6 points. + +% 2004.11.28 PCA 0.7 Add 'cm/computermodern' options. +% Change default to Times text. + +% 2004.12.14 PCA 0.8 Remove use of mathptm.sty; it cannot +% coexist with latexsym or amssymb. + +% 2005.01.20 PCA 0.9 Rename class file to sigplanconf.cls. + +% 2005.03.05 PCA 0.91 Change default copyright data. + +% 2005.03.06 PCA 0.92 Add at-signs to some macro names. + +% 2005.03.07 PCA 0.93 The 'onecolumn' option defaults to '11pt', +% and it uses the full type width. + +% 2005.03.15 PCA 0.94 Add at-signs to more macro names. +% Allow margin paragraphs during review. + +% 2005.03.22 PCA 0.95 Implement \euro. +% Remove proof and newdef environments. + +% 2005.05.06 PCA 1.0 Eliminate 'onecolumn' option. +% Change footer to small italic and eliminate +% left portion if no \preprintfooter. +% Eliminate copyright notice if preprint. +% Clean up and shrink copyright box. + +% 2005.05.30 PCA 1.1 Add alternate permission statements. + +% 2005.06.29 PCA 1.1 Publish final first edition of guide. + +% 2005.07.14 PCA 1.2 Add \subparagraph. +% Use block paragraphs in lists, and adjust +% spacing between items and paragraphs. + +% 2006.06.22 PCA 1.3 Add 'reprint' option and associated +% commands. + +% 2006.08.24 PCA 1.4 Fix bug in \maketitle case command. + +% 2007.03.13 PCA 1.5 The title banner only displays with the +% 'preprint' option. + +% 2007.06.06 PCA 1.6 Use \bibfont in \thebibliography. +% Add 'natbib' option to load and configure +% the natbib package. + +% 2007.11.20 PCA 1.7 Balance line lengths in centered article +% title (thanks to Norman Ramsey). + +% 2009.01.26 PCA 1.8 Change natbib \bibpunct values. + +% 2009.03.24 PCA 1.9 Change natbib to use the 'numbers' option. +% Change templates to use 'natbib' option. + +% 2009.09.01 PCA 2.0 Add \reprintprice command (suggested by +% Stephen Chong). + +% 2009.09.08 PCA 2.1 Make 'natbib' the default; add 'nonatbib'. +% SB Add 'authoryear' and 'numbers' (default) to +% control citation style when using natbib. +% Add \bibpunct to change punctuation for +% 'authoryear' style. + +% 2009.09.21 PCA 2.2 Add \softraggedright to the thebibliography +% environment. Also add to template so it will +% happen with natbib. + +% 2009.09.30 PCA 2.3 Remove \softraggedright from thebibliography. +% Just include in the template. + From cfbolz at codespeak.net Fri Sep 17 13:37:34 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Fri, 17 Sep 2010 13:37:34 +0200 (CEST) Subject: [pypy-svn] r77132 - pypy/extradoc/talk/pepm2011 Message-ID: <20100917113734.E9C92282B9E@codespeak.net> Author: cfbolz Date: Fri Sep 17 13:37:33 2010 New Revision: 77132 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: fix Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Fri Sep 17 13:37:33 2010 @@ -113,6 +113,6 @@ \label{sec:conclusions} \bibliographystyle{abbrv} -\bibliography{paper,michael} +\bibliography{paper} \end{document} From cfbolz at codespeak.net Fri Sep 17 13:57:26 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Fri, 17 Sep 2010 13:57:26 +0200 (CEST) Subject: [pypy-svn] r77133 - pypy/extradoc/talk/pepm2011 Message-ID: <20100917115726.448B436C21D@codespeak.net> Author: cfbolz Date: Fri Sep 17 13:57:11 2010 New Revision: 77133 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: paperify the blog post Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Fri Sep 17 13:57:11 2010 @@ -91,6 +91,36 @@ \section{Introduction} +The goal of a just-in-time compiler for a dynamic language is obviously to +improve the speed of the language over an implementation of the language that +uses interpretation. The first goal of a JIT is thus to remove the +interpretation overhead, i.e. the overhead of bytecode (or AST) dispatch and the +overhead of the interpreter's data structures, such as operand stack etc. The +second important problem that any JIT for a dynamic language needs to solve is +how to deal with the overhead of boxing of primitive types and of type +dispatching. Those are problems that are usually not present in statically typed +languages. + +Boxing of primitive types means that dynamic languages need to be able to handle +all objects, even integers, floats, etc. in the same way as user-defined +instances. Thus those primitive types are usually \emph{boxed}, i.e. a small +heap-structure is allocated for them, that contains the actual value. + +Type dispatching is the process of finding the concrete implementation that is +applicable to the objects at hand when doing a generic operation at hand. An +example would be the addition of two objects: The addition needs to check what +the concrete objects are that should be added are, and choose the implementation +that is fitting for them. + +Last year, we wrote a paper \cite{XXX} about how PyPy's meta-JIT +approach works. These explain how the meta-tracing JIT can remove the overhead +of bytecode dispatch. In this paper we want to explain how the traces that are +produced by our meta-tracing JIT are then optimized to also remove some of the +overhead more closely associated to dynamic languages, such as boxing overhead +and type dispatching. The most important technique to achieve this is a form of +escape analysis \cite{XXX} that we call \emph{virtual objects}. This is best +explained via an example. + \section{Background} \label{sec:Background} @@ -100,6 +130,278 @@ \subsection{Tracing JIT Compilers} \label{sub:JIT_background} +\section{Escape Analysis in a Tracing JIT} +\label{sec:Escape Analysis in a Tracing JIT} + +\subsection{Running Example} + +For the purpose of this paper, we are going to use a very simple object +model, that just supports an integer and a float type. The objects support only +two operations, \texttt{add}, which adds two objects (promoting ints to floats in a +mixed addition) and \texttt{is\_positive}, which returns whether the number is greater +than zero. The implementation of \texttt{add} uses classical Smalltalk-like +double-dispatching. These classes could be part of the implementation of a very +simple interpreter written in RPython. + +\begin{verbatim} +class Base(object): + def add(self, other): + """ add self to other """ + raise NotImplementedError("abstract base") + def add__int(self, intother): + """ add intother to self, where intother is a Python integer """ + raise NotImplementedError("abstract base") + def add__float(self, floatother): + """ add floatother to self, where floatother is a Python float """ + raise NotImplementedError("abstract base") + def is_positive(self): + """ returns whether self is positive """ + raise NotImplementedError("abstract base") + +class BoxedInteger(Base): + def __init__(self, intval): + self.intval = intval + def add(self, other): + return other.add__int(self.intval) + def add__int(self, intother): + return BoxedInteger(intother + self.intval) + def add__float(self, floatother): + return BoxedFloat(floatother + float(self.intval)) + def is_positive(self): + return self.intval > 0 + +class BoxedFloat(Base): + def __init__(self, floatval): + self.floatval = floatval + def add(self, other): + return other.add__float(self.floatval) + def add__int(self, intother): + return BoxedFloat(float(intother) + self.floatval) + def add__float(self, floatother): + return BoxedFloat(floatother + self.floatval) + def is_positive(self): + return self.floatval > 0.0 +\end{verbatim} + +Using these classes to implement arithmetic shows the basic problem that a +dynamic language implementation has. All the numbers are instances of either +\texttt{BoxedInteger} or \texttt{BoxedFloat}, thus they consume space on the +heap. Performing many arithmetic operations produces lots of garbage quickly, +thus putting pressure on the garbage collector. Using double dispatching to +implement the numeric tower needs two method calls per arithmetic operation, +which is costly due to the method dispatch. + +To understand the problems more directly, let us consider a simple function +that uses the object model: + +\begin{verbatim} +def f(y): + res = BoxedInteger(0) + while y.is_positive(): + res = res.add(y).add(BoxedInteger(-100)) + y = y.add(BoxedInteger(-1)) + return res +\end{verbatim} + +The loop iterates \texttt{y} times, and computes something in the process. To +understand the reason why executing this function is slow, here is the trace +that is produced by the tracing JIT when executing the function with \texttt{y} +being a \texttt{BoxedInteger}: + +\begin{verbatim} +# arguments to the trace: p0, p1 +# inside f: res.add(y) +guard_class(p1, BoxedInteger) + # inside BoxedInteger.add + i2 = getfield_gc(p1, intval) + guard_class(p0, BoxedInteger) + # inside BoxedInteger.add__int + i3 = getfield_gc(p0, intval) + i4 = int_add(i2, i3) + p5 = new(BoxedInteger) + # inside BoxedInteger.__init__ + setfield_gc(p5, i4, intval) +# inside f: BoxedInteger(-100) +p6 = new(BoxedInteger) + # inside BoxedInteger.__init__ + setfield_gc(p6, -100, intval) + +# inside f: .add(BoxedInteger(-100)) +guard_class(p5, BoxedInteger) + # inside BoxedInteger.add + i7 = getfield_gc(p5, intval) + guard_class(p6, BoxedInteger) + # inside BoxedInteger.add__int + i8 = getfield_gc(p6, intval) + i9 = int_add(i7, i8) + p10 = new(BoxedInteger) + # inside BoxedInteger.__init__ + setfield_gc(p10, i9, intval) + +# inside f: BoxedInteger(-1) +p11 = new(BoxedInteger) + # inside BoxedInteger.__init__ + setfield_gc(p11, -1, intval) + +# inside f: y.add(BoxedInteger(-1)) +guard_class(p0, BoxedInteger) + # inside BoxedInteger.add + i12 = getfield_gc(p0, intval) + guard_class(p11, BoxedInteger) + # inside BoxedInteger.add__int + i13 = getfield_gc(p11, intval) + i14 = int_add(i12, i13) + p15 = new(BoxedInteger) + # inside BoxedInteger.__init__ + setfield_gc(p15, i14, intval) + +# inside f: y.is_positive() +guard_class(p15, BoxedInteger) + # inside BoxedInteger.is_positive + i16 = getfield_gc(p15, intval) + i17 = int_gt(i16, 0) +# inside f +guard_true(i17) +jump(p15, p10) +\end{verbatim} + +(indentation corresponds to the stack level of the traced functions). + +The trace is inefficient for a couple of reasons. One problem is that it checks +repeatedly and redundantly for the class of the objects around, using a +\texttt{guard\_class} instruction. In addition, some new \texttt{BoxedInteger} instances are +constructed using the \texttt{new} operation, only to be used once and then forgotten +a bit later. In the next section, we will see how this can be improved upon, +using escape analysis. + +\subsection{Virtual Objects} + +The main insight to improve the code shown in the last section is that some of +the objects created in the trace using a \texttt{new} operation don't survive very +long and are collected by the garbage collector soon after their allocation. +Moreover, they are used only inside the loop, thus we can easily prove that +nobody else in the program stores a reference to them. The +idea for improving the code is thus to analyze which objects never escape the +loop and may thus not be allocated at all. + +This process is called \emph{escape analysis}. The escape analysis of +our tracing JIT works by using \emph{virtual objects}: The trace is walked from +beginning to end and whenever a \texttt{new} operation is seen, the operation is +removed and a virtual object is constructed. The virtual object summarizes the +shape of the object that is allocated at this position in the original trace, +and is used by the escape analysis to improve the trace. The shape describes +where the values that would be stored in the fields of the allocated objects +come from. Whenever the optimizer sees a \texttt{setfield} that writes into a virtual +object, that shape summary is thus updated and the operation can be removed. +When the optimizer encounters a \texttt{getfield} from a virtual, the result is read +from the virtual object, and the operation is also removed. + +In the example from last section, the following operations would produce two +virtual objects, and be completely removed from the optimized trace: + +\begin{verbatim} +p5 = new(BoxedInteger) +setfield_gc(p5, i4, intval) +p6 = new(BoxedInteger) +setfield_gc(p6, -100, intval) +\end{verbatim} + + +The virtual object stored in \texttt{p5} would know that it is an \texttt{BoxedInteger}, and that +the \texttt{intval} field contains \texttt{i4}, the one stored in \texttt{p6} would know that +its \texttt{intval} field contains the constant -100. + +The following operations, that use \texttt{p5} and \texttt{p6} could then be +optimized using that knowledge: + +\begin{verbatim} +guard_class(p5, BoxedInteger) +i7 = getfield_gc(p5, intval) +# inside BoxedInteger.add +guard_class(p6, BoxedInteger) +# inside BoxedInteger.add__int +i8 = getfield_gc(p6, intval) +i9 = int_add(i7, i8) +\end{verbatim} + +The \texttt{guard\_class} operations can be removed, because the classes of \texttt{p5} and +\texttt{p6} are known to be \texttt{BoxedInteger}. The \texttt{getfield\_gc} operations can be removed +and \texttt{i7} and \texttt{i8} are just replaced by \texttt{i4} and -100. Thus the only +remaining operation in the optimized trace would be: + +\begin{verbatim} +i9 = int_add(i4, -100) +\end{verbatim} + +The rest of the trace is optimized similarly. + +So far we have only described what happens when virtual objects are used in +operations that read and write their fields. When the virtual object is used in +any other operation, it cannot stay virtual. For example, when a virtual object +is stored in a globally accessible place, the object needs to actually be +allocated, as it will live longer than one iteration of the loop. + +This is what happens at the end of the trace above, when the \texttt{jump} operation +is hit. The arguments of the jump are at this point virtual objects. Before the +jump is emitted, they are \emph{forced}. This means that the optimizers produces code +that allocates a new object of the right type and sets its fields to the field +values that the virtual object has. This means that instead of the jump, the +following operations are emitted: + +\begin{verbatim} +p15 = new(BoxedInteger) +setfield_gc(p15, i14, intval) +p10 = new(BoxedInteger) +setfield_gc(p10, i9, intval) +jump(p15, p10) +\end{verbatim} + +Note how the operations for creating these two instances has been moved down the +trace. It looks like for these operations we actually didn't win much, because +the objects are still allocated at the end. However, the optimization was still +worthwhile even in this case, because some operations that have been performed +on the forced virtual objects have been removed (some \texttt{getfield\_gc} operations +and \texttt{guard\_class} operations). + +The final optimized trace of the example looks like this: + +\begin{verbatim} +# arguments to the trace: p0, p1 +guard_class(p1, BoxedInteger) +i2 = getfield_gc(p1, intval) +guard_class(p0, BoxedInteger) +i3 = getfield_gc(p0, intval) +i4 = int_add(i2, i3) +i9 = int_add(i4, -100) + +guard_class(p0, BoxedInteger) +i12 = getfield_gc(p0, intval) +i14 = int_add(i12, -1) + +i17 = int_gt(i14, 0) +guard_true(i17) +p15 = new(BoxedInteger) +setfield_gc(p15, i14, intval) +p10 = new(BoxedInteger) +setfield_gc(p10, i9, intval) +jump(p15, p10) +\end{verbatim} + +The optimized trace contains only two allocations, instead of the original five, +and only three \texttt{guard\_class} operations, from the original seven. + + +%___________________________________________________________________________ + +\subsection{Summary} + +In this section we described how simple escape analysis within the scope of one +loop works. This optimizations reduces the allocation of many intermediate data +structures that become garbage quickly in an interpreter. It also removes a lot +of the type dispatching overhead. In the next section, we will explain how this +optimization can be improved further. + +% section Escape Analysis in a Tracing JIT (end) \section{Evaluation} From antocuni at codespeak.net Fri Sep 17 14:29:51 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Fri, 17 Sep 2010 14:29:51 +0200 (CEST) Subject: [pypy-svn] r77134 - in pypy/branch/resoperation-refactoring/pypy/jit: backend/cli backend/llgraph backend/llsupport backend/llsupport/test backend/llvm backend/test backend/x86 metainterp metainterp/optimizeopt metainterp/test tool Message-ID: <20100917122951.61849282B9E@codespeak.net> Author: antocuni Date: Fri Sep 17 14:29:40 2010 New Revision: 77134 Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/cli/method.py pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/gc.py pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/test/test_gc.py pypy/branch/resoperation-refactoring/pypy/jit/backend/llvm/compile.py pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/assembler.py pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/graphpage.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/heap.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/intbounds.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_oparser.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_virtualref.py pypy/branch/resoperation-refactoring/pypy/jit/tool/showstats.py Log: (david, antocuni): introduce an official interface to get the opnum, and remove the old "opnum" attribute (well, not so far :-)) Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/cli/method.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/cli/method.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/cli/method.py Fri Sep 17 14:29:40 2010 @@ -207,7 +207,7 @@ def _collect_types(self, operations, box2classes): for op in operations: - if op.opnum in (rop.GETFIELD_GC, rop.SETFIELD_GC): + if op.getopnum() in (rop.GETFIELD_GC, rop.SETFIELD_GC): box = op.args[0] descr = op.descr assert isinstance(descr, runner.FieldDescr) @@ -335,7 +335,7 @@ while self.i < N: op = oplist[self.i] self.emit_debug(op.repr()) - func = self.operations[op.opnum] + func = self.operations[op.getopnum()] assert func is not None func(self, op) self.i += 1 @@ -410,7 +410,7 @@ def emit_ovf_op(self, op, emit_op): next_op = self.oplist[self.i+1] - if next_op.opnum == rop.GUARD_NO_OVERFLOW: + if next_op.getopnum() == rop.GUARD_NO_OVERFLOW: self.i += 1 self.emit_ovf_op_and_guard(op, next_op, emit_op) return Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py Fri Sep 17 14:29:40 2010 @@ -151,11 +151,11 @@ def _compile_operations(self, c, operations, var2index): for op in operations: - llimpl.compile_add(c, op.opnum) + llimpl.compile_add(c, op.getopnum()) descr = op.descr if isinstance(descr, Descr): llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo) - if isinstance(descr, history.LoopToken) and op.opnum != rop.JUMP: + if isinstance(descr, history.LoopToken) and op.getopnum() != rop.JUMP: llimpl.compile_add_loop_token(c, descr) if self.is_oo and isinstance(descr, (OODescr, MethDescr)): # hack hack, not rpython @@ -204,12 +204,12 @@ x)) op = operations[-1] assert op.is_final() - if op.opnum == rop.JUMP: + if op.getopnum() == rop.JUMP: targettoken = op.descr assert isinstance(targettoken, history.LoopToken) compiled_version = targettoken._llgraph_compiled_version llimpl.compile_add_jump_target(c, compiled_version) - elif op.opnum == rop.FINISH: + elif op.getopnum() == rop.FINISH: faildescr = op.descr index = self.get_fail_descr_number(faildescr) llimpl.compile_add_fail(c, index) Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/gc.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/gc.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/gc.py Fri Sep 17 14:29:40 2010 @@ -559,7 +559,7 @@ # newops = [] for op in operations: - if op.opnum == rop.DEBUG_MERGE_POINT: + if op.getopnum() == rop.DEBUG_MERGE_POINT: continue # ---------- replace ConstPtrs with GETFIELD_RAW ---------- # xxx some performance issue here @@ -576,7 +576,7 @@ self.single_gcref_descr)) op.setarg(i, box) # ---------- write barrier for SETFIELD_GC ---------- - if op.opnum == rop.SETFIELD_GC: + if op.getopnum() == rop.SETFIELD_GC: v = op.getarg(1) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL @@ -584,7 +584,7 @@ op = ResOperation(rop.SETFIELD_RAW, op._args, None, descr=op.descr) # ---------- write barrier for SETARRAYITEM_GC ---------- - if op.opnum == rop.SETARRAYITEM_GC: + if op.getopnum() == rop.SETARRAYITEM_GC: v = op.getarg(2) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/test/test_gc.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/test/test_gc.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/test/test_gc.py Fri Sep 17 14:29:40 2010 @@ -258,7 +258,7 @@ gc_ll_descr._gen_write_barrier(newops, v_base, v_value) assert llop1.record == [] assert len(newops) == 1 - assert newops[0].opnum == rop.COND_CALL_GC_WB + assert newops[0].getopnum() == rop.COND_CALL_GC_WB assert newops[0].getarg(0) == v_base assert newops[0].getarg(1) == v_value assert newops[0].result is None @@ -298,12 +298,12 @@ gc_ll_descr.gcrefs = MyFakeGCRefList() gc_ll_descr.rewrite_assembler(MyFakeCPU(), operations) assert len(operations) == 2 - assert operations[0].opnum == rop.GETFIELD_RAW + assert operations[0].getopnum() == rop.GETFIELD_RAW assert operations[0].getarg(0) == ConstInt(43) assert operations[0].descr == gc_ll_descr.single_gcref_descr v_box = operations[0].result assert isinstance(v_box, BoxPtr) - assert operations[1].opnum == rop.PTR_EQ + assert operations[1].getopnum() == rop.PTR_EQ assert operations[1].getarg(0) == v_random_box assert operations[1].getarg(1) == v_box assert operations[1].result == v_result @@ -337,7 +337,7 @@ finally: rgc.can_move = old_can_move assert len(operations) == 1 - assert operations[0].opnum == rop.PTR_EQ + assert operations[0].getopnum() == rop.PTR_EQ assert operations[0].getarg(0) == v_random_box assert operations[0].getarg(1) == ConstPtr(s_gcref) assert operations[0].result == v_result @@ -358,12 +358,12 @@ gc_ll_descr.rewrite_assembler(self.fake_cpu, operations) assert len(operations) == 2 # - assert operations[0].opnum == rop.COND_CALL_GC_WB + assert operations[0].getopnum() == rop.COND_CALL_GC_WB assert operations[0].getarg(0) == v_base assert operations[0].getarg(1) == v_value assert operations[0].result is None # - assert operations[1].opnum == rop.SETFIELD_RAW + assert operations[1].getopnum() == rop.SETFIELD_RAW assert operations[1].getarg(0) == v_base assert operations[1].getarg(1) == v_value assert operations[1].descr == field_descr @@ -382,12 +382,12 @@ gc_ll_descr.rewrite_assembler(self.fake_cpu, operations) assert len(operations) == 2 # - assert operations[0].opnum == rop.COND_CALL_GC_WB + assert operations[0].getopnum() == rop.COND_CALL_GC_WB assert operations[0].getarg(0) == v_base assert operations[0].getarg(1) == v_value assert operations[0].result is None # - assert operations[1].opnum == rop.SETARRAYITEM_RAW + assert operations[1].getopnum() == rop.SETARRAYITEM_RAW assert operations[1].getarg(0) == v_base assert operations[1].getarg(1) == v_index assert operations[1].getarg(2) == v_value Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llvm/compile.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/llvm/compile.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/llvm/compile.py Fri Sep 17 14:29:40 2010 @@ -107,7 +107,7 @@ # store away the exception into self.backup_exc_xxx, *unless* the # branch starts with a further GUARD_EXCEPTION/GUARD_NO_EXCEPTION. if exc: - opnum = operations[0].opnum + opnum = operations[0].getopnum() if opnum not in (rop.GUARD_EXCEPTION, rop.GUARD_NO_EXCEPTION): self._store_away_exception() # Normal handling of the operations follows. @@ -115,7 +115,7 @@ self._generate_op(op) def _generate_op(self, op): - opnum = op.opnum + opnum = op.getopnum() for i, name in all_operations: if opnum == i: meth = getattr(self, name) Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py Fri Sep 17 14:29:40 2010 @@ -580,7 +580,7 @@ assert self.should_fail_by.fail_args is not None return self.should_fail_by.fail_args else: - assert self.should_fail_by.opnum == rop.FINISH + assert self.should_fail_by.getopnum() == rop.FINISH return self.should_fail_by.getarglist() def clear_state(self): @@ -620,7 +620,7 @@ exc = cpu.grab_exc_value() if (self.guard_op is not None and self.guard_op.is_guard_exception()): - if self.guard_op.opnum == rop.GUARD_NO_EXCEPTION: + if self.guard_op.getopnum() == rop.GUARD_NO_EXCEPTION: assert exc else: assert not exc Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/assembler.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/assembler.py Fri Sep 17 14:29:40 2010 @@ -389,7 +389,7 @@ def _find_debug_merge_point(self, operations): for op in operations: - if op.opnum == rop.DEBUG_MERGE_POINT: + if op.getopnum() == rop.DEBUG_MERGE_POINT: funcname = op.getarg(0)._get_str() break else: @@ -681,10 +681,10 @@ self.mc.POP(loc) def regalloc_perform(self, op, arglocs, resloc): - genop_list[op.opnum](self, op, arglocs, resloc) + genop_list[op.getopnum()](self, op, arglocs, resloc) def regalloc_perform_discard(self, op, arglocs): - genop_discard_list[op.opnum](self, op, arglocs) + genop_discard_list[op.getopnum()](self, op, arglocs) def regalloc_perform_with_guard(self, op, guard_op, faillocs, arglocs, resloc, current_depths): @@ -692,14 +692,14 @@ assert isinstance(faildescr, AbstractFailDescr) faildescr._x86_current_depths = current_depths failargs = guard_op.fail_args - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() guard_token = self.implement_guard_recovery(guard_opnum, faildescr, failargs, faillocs) if op is None: dispatch_opnum = guard_opnum else: - dispatch_opnum = op.opnum + dispatch_opnum = op.getopnum() res = genop_guard_list[dispatch_opnum](self, op, guard_op, guard_token, arglocs, resloc) faildescr._x86_adr_jump_offset = res @@ -755,7 +755,7 @@ def _cmpop_guard(cond, rev_cond, false_cond, false_rev_cond): def genop_cmp_guard(self, op, guard_op, guard_token, arglocs, result_loc): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() if isinstance(op.getarg(0), Const): self.mc.CMP(arglocs[1], arglocs[0]) if guard_opnum == rop.GUARD_FALSE: @@ -773,7 +773,7 @@ def _cmpop_guard_float(cond, false_cond, need_jp): def genop_cmp_guard_float(self, op, guard_op, guard_token, arglocs, result_loc): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() self.mc.UCOMISD(arglocs[0], arglocs[1]) # 16 is enough space for the rel8 jumps below and the rel32 # jump in implement_guard @@ -942,7 +942,7 @@ genop_guard_float_ge = _cmpop_guard_float("AE", "B", False) def genop_guard_float_ne(self, op, guard_op, guard_token, arglocs, result_loc): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() self.mc.UCOMISD(arglocs[0], arglocs[1]) # 16 is enough space for the rel8 jumps below and the rel32 # jump in implement_guard @@ -970,7 +970,7 @@ self.mc.CVTSI2SD(resloc, arglocs[0]) def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() self.mc.CMP(arglocs[0], imm(0)) if guard_opnum == rop.GUARD_TRUE: return self.implement_guard(guard_token, 'Z') @@ -984,7 +984,7 @@ self.mc.MOVZX8(resloc, rl) def genop_guard_int_is_zero(self, op, guard_op, guard_token, arglocs, resloc): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() self.mc.CMP(arglocs[0], imm(0)) if guard_opnum == rop.GUARD_TRUE: return self.implement_guard(guard_token, 'NZ') @@ -1216,7 +1216,7 @@ return addr def _gen_guard_overflow(self, guard_op, guard_token): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() if guard_opnum == rop.GUARD_NO_OVERFLOW: return self.implement_guard(guard_token, 'O') elif guard_opnum == rop.GUARD_OVERFLOW: Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py Fri Sep 17 14:29:40 2010 @@ -268,7 +268,7 @@ selected_reg, need_lower_byte) def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.opnum != rop.JUMP or jump.descr is not looptoken: + if jump.getopnum() != rop.JUMP or jump.descr is not looptoken: loop_consts = {} else: loop_consts = {} @@ -352,19 +352,19 @@ self.assembler.regalloc_perform_discard(op, arglocs) def can_merge_with_next_guard(self, op, i, operations): - if op.opnum == rop.CALL_MAY_FORCE or op.opnum == rop.CALL_ASSEMBLER: - assert operations[i + 1].opnum == rop.GUARD_NOT_FORCED + if op.getopnum() == rop.CALL_MAY_FORCE or op.getopnum() == rop.CALL_ASSEMBLER: + assert operations[i + 1].getopnum() == rop.GUARD_NOT_FORCED return True if not op.is_comparison(): if op.is_ovf(): - if (operations[i + 1].opnum != rop.GUARD_NO_OVERFLOW and - operations[i + 1].opnum != rop.GUARD_OVERFLOW): + if (operations[i + 1].getopnum() != rop.GUARD_NO_OVERFLOW and + operations[i + 1].getopnum() != rop.GUARD_OVERFLOW): print "int_xxx_ovf not followed by guard_(no)_overflow" raise AssertionError return True return False - if (operations[i + 1].opnum != rop.GUARD_TRUE and - operations[i + 1].opnum != rop.GUARD_FALSE): + if (operations[i + 1].getopnum() != rop.GUARD_TRUE and + operations[i + 1].getopnum() != rop.GUARD_FALSE): return False if operations[i + 1].getarg(0) is not op.result: return False @@ -385,10 +385,10 @@ self.possibly_free_vars_for_op(op) continue if self.can_merge_with_next_guard(op, i, operations): - oplist_with_guard[op.opnum](self, op, operations[i + 1]) + oplist_with_guard[op.getopnum()](self, op, operations[i + 1]) i += 1 else: - oplist[op.opnum](self, op) + oplist[op.getopnum()](self, op) if op.result is not None: self.possibly_free_var(op.result) self.rm._check_invariants() @@ -412,7 +412,7 @@ arg = op.getarg(j) if isinstance(arg, Box): if arg not in start_live: - print "Bogus arg in operation %d at %d" % (op.opnum, i) + print "Bogus arg in operation %d at %d" % (op.getopnum(), i) raise AssertionError longevity[arg] = (start_live[arg], i) if op.is_guard(): @@ -421,7 +421,7 @@ continue assert isinstance(arg, Box) if arg not in start_live: - print "Bogus arg in guard %d at %d" % (op.opnum, i) + print "Bogus arg in guard %d at %d" % (op.getopnum(), i) raise AssertionError longevity[arg] = (start_live[arg], i) for arg in inputargs: Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py Fri Sep 17 14:29:40 2010 @@ -234,10 +234,10 @@ def store_final_boxes(self, guard_op, boxes): guard_op.fail_args = boxes - self.guard_opnum = guard_op.opnum + self.guard_opnum = guard_op.getopnum() def make_a_counter_per_value(self, guard_value_op): - assert guard_value_op.opnum == rop.GUARD_VALUE + assert guard_value_op.getopnum() == rop.GUARD_VALUE box = guard_value_op.getarg(0) try: i = guard_value_op.fail_args.index(box) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/graphpage.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/graphpage.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/graphpage.py Fri Sep 17 14:29:40 2010 @@ -76,7 +76,7 @@ for i, op in enumerate(graph.get_operations()): if is_interesting_guard(op): self.mark_starter(graphindex, i+1) - if op.opnum == rop.DEBUG_MERGE_POINT: + if op.getopnum() == rop.DEBUG_MERGE_POINT: if not last_was_mergepoint: last_was_mergepoint = True self.mark_starter(graphindex, i) @@ -167,7 +167,7 @@ self.genedge((graphindex, opstartindex), (graphindex, opindex)) break - if op.opnum == rop.JUMP: + if op.getopnum() == rop.JUMP: tgt = op.descr tgt_g = -1 if tgt is None: Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py Fri Sep 17 14:29:40 2010 @@ -785,7 +785,7 @@ assert box not in seen seen[box] = True assert operations[-1].is_final() - if operations[-1].opnum == rop.JUMP: + if operations[-1].getopnum() == rop.JUMP: target = operations[-1].descr if target is not None: assert isinstance(target, LoopToken) @@ -811,7 +811,7 @@ return '<%s>' % (self.name,) def _list_all_operations(result, operations, omit_finish=True): - if omit_finish and operations[-1].opnum == rop.FINISH: + if omit_finish and operations[-1].getopnum() == rop.FINISH: # xxx obscure return result.extend(operations) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py Fri Sep 17 14:29:40 2010 @@ -79,7 +79,7 @@ debug_print('[' + args + ']') for i in range(len(operations)): op = operations[i] - if op.opnum == rop.DEBUG_MERGE_POINT: + if op.getopnum() == rop.DEBUG_MERGE_POINT: loc = op.getarg(0)._get_str() debug_print("debug_merge_point('%s')" % (loc,)) continue Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py Fri Sep 17 14:29:40 2010 @@ -144,7 +144,7 @@ def find_nodes(self, operations): for op in operations: - opnum = op.opnum + opnum = op.getopnum() for value, func in find_nodes_ops: if opnum == value: func(self, op) @@ -163,7 +163,7 @@ argboxes = [self.get_constant_box(op.getarg(i)) for i in range(op.numargs())] resbox = execute_nonspec(self.cpu, None, - op.opnum, argboxes, op.descr) + op.getopnum(), argboxes, op.descr) self.set_constant_node(op.result, resbox.constbox()) # default case: mark the arguments as escaping for i in range(op.numargs()): @@ -328,7 +328,7 @@ def show(self): from pypy.jit.metainterp.viewnode import viewnodes, view op = self._loop.operations[-1] - assert op.opnum == rop.JUMP + assert op.getopnum() == rop.JUMP exitnodes = [self.getnode(arg) for arg in op.args] viewnodes(self.inputnodes, exitnodes) if hasattr(self._loop.token, "specnodes"): @@ -347,7 +347,7 @@ # Build the list of specnodes based on the result # computed by NodeFinder.find_nodes(). op = loop.operations[-1] - assert op.opnum == rop.JUMP + assert op.getopnum() == rop.JUMP assert len(self.inputnodes) == op.numargs() while True: self.restart_needed = False Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/heap.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/heap.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/heap.py Fri Sep 17 14:29:40 2010 @@ -105,7 +105,7 @@ if op.is_guard(): self.optimizer.pendingfields = self.force_lazy_setfields_for_guard() return - opnum = op.opnum + opnum = op.getopnum() if (opnum == rop.SETFIELD_GC or opnum == rop.SETARRAYITEM_GC or opnum == rop.DEBUG_MERGE_POINT): @@ -142,7 +142,7 @@ return self.force_all_lazy_setfields() elif op.is_final() or (not we_are_translated() and - op.opnum < 0): # escape() operations + op.getopnum() < 0): # escape() operations self.force_all_lazy_setfields() self.clean_caches() @@ -166,7 +166,7 @@ # - is_comparison() for cases like "int_eq/setfield_gc/guard_true" # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced" # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow" - opnum = prevop.opnum + opnum = prevop.getopnum() lastop_args = lastop.getarglist() if ((prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE or prevop.is_ovf()) @@ -257,7 +257,7 @@ write=True) def propagate_forward(self, op): - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/intbounds.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/intbounds.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/intbounds.py Fri Sep 17 14:29:40 2010 @@ -9,7 +9,7 @@ remove redundant guards""" def propagate_forward(self, op): - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) @@ -30,7 +30,7 @@ op = self.optimizer.producer[box] except KeyError: return - opnum = op.opnum + opnum = op.getopnum() for value, func in propagate_bounds_ops: if opnum == value: func(self, op) @@ -84,9 +84,9 @@ v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.add_bound(v2.intbound) if resbound.has_lower and resbound.has_upper and \ - self.nextop().opnum == rop.GUARD_NO_OVERFLOW: + self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW: # Transform into INT_ADD and remove guard - op.opnum = rop.INT_ADD + op._opnum = rop.INT_ADD self.skip_nextop() self.optimize_INT_ADD(op) else: @@ -99,9 +99,9 @@ v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.sub_bound(v2.intbound) if resbound.has_lower and resbound.has_upper and \ - self.nextop().opnum == rop.GUARD_NO_OVERFLOW: + self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW: # Transform into INT_SUB and remove guard - op.opnum = rop.INT_SUB + op._opnum = rop.INT_SUB self.skip_nextop() self.optimize_INT_SUB(op) else: @@ -114,9 +114,9 @@ v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.mul_bound(v2.intbound) if resbound.has_lower and resbound.has_upper and \ - self.nextop().opnum == rop.GUARD_NO_OVERFLOW: + self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW: # Transform into INT_MUL and remove guard - op.opnum = rop.INT_MUL + op._opnum = rop.INT_MUL self.skip_nextop() self.optimize_INT_MUL(op) else: Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py Fri Sep 17 14:29:40 2010 @@ -308,7 +308,7 @@ def propagate_forward(self, op): self.producer[op.result] = op - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) @@ -348,7 +348,7 @@ compile.giveup() descr.store_final_boxes(op, newboxes) # - if op.opnum == rop.GUARD_VALUE: + if op.getopnum() == rop.GUARD_VALUE: if self.getvalue(op.getarg(0)) in self.bool_boxes: # Hack: turn guard_value(bool) into guard_true/guard_false. # This is done after the operation is emitted, to let @@ -377,7 +377,7 @@ args.append(self.values[arg].get_key_box()) else: args.append(arg) - args.append(ConstInt(op.opnum)) + args.append(ConstInt(op.getopnum())) return args def optimize_default(self, op): @@ -385,7 +385,7 @@ is_ovf = op.is_ovf() if is_ovf: nextop = self.loop.operations[self.i + 1] - canfold = nextop.opnum == rop.GUARD_NO_OVERFLOW + canfold = nextop.getopnum() == rop.GUARD_NO_OVERFLOW if canfold: for i in range(op.numargs()): if self.get_constant_box(op.getarg(i)) is None: @@ -395,7 +395,7 @@ argboxes = [self.get_constant_box(op.getarg(i)) for i in range(op.numargs())] resbox = execute_nonspec(self.cpu, None, - op.opnum, argboxes, op.descr) + op.getopnum(), argboxes, op.descr) self.make_constant(op.result, resbox.constbox()) if is_ovf: self.i += 1 # skip next operation, it is the unneeded guard @@ -405,7 +405,7 @@ args = self.make_args_key(op) oldop = self.pure_operations.get(args, None) if oldop is not None and oldop.descr is op.descr: - assert oldop.opnum == op.opnum + assert oldop.getopnum() == op.getopnum() self.make_equal_to(op.result, self.getvalue(oldop.result)) if is_ovf: self.i += 1 # skip next operation, it is the unneeded guard Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py Fri Sep 17 14:29:40 2010 @@ -14,7 +14,7 @@ if self.find_rewritable_bool(op, args): return - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) @@ -39,7 +39,7 @@ def find_rewritable_bool(self, op, args): try: - oldopnum = opboolinvers[op.opnum] + oldopnum = opboolinvers[op.getopnum()] targs = [args[0], args[1], ConstInt(oldopnum)] if self.try_boolinvers(op, targs): return True @@ -47,7 +47,7 @@ pass try: - oldopnum = opboolreflex[op.opnum] # FIXME: add INT_ADD, INT_MUL + oldopnum = opboolreflex[op.getopnum()] # FIXME: add INT_ADD, INT_MUL targs = [args[1], args[0], ConstInt(oldopnum)] oldop = self.optimizer.pure_operations.get(targs, None) if oldop is not None and oldop.descr is op.descr: @@ -57,7 +57,7 @@ pass try: - oldopnum = opboolinvers[opboolreflex[op.opnum]] + oldopnum = opboolinvers[opboolreflex[op.getopnum()]] targs = [args[1], args[0], ConstInt(oldopnum)] if self.try_boolinvers(op, targs): return True @@ -215,7 +215,7 @@ # there already has been a guard_nonnull or guard_class or # guard_nonnull_class on this value. old_guard_op = self.optimizer.newoperations[value.last_guard_index] - if old_guard_op.opnum == rop.GUARD_NONNULL: + if old_guard_op.getopnum() == rop.GUARD_NONNULL: # it was a guard_nonnull, which we replace with a # guard_nonnull_class. new_guard_op = old_guard_op.copy_and_change (rop.GUARD_NONNULL_CLASS, @@ -253,7 +253,7 @@ return # change the op to be a normal call, from the backend's point of view # there is no reason to have a separate operation for this - op.opnum = rop.CALL + op._opnum = rop.CALL self.emit_operation(op) resvalue = self.getvalue(op.result) self.optimizer.loop_invariant_results[key] = resvalue Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py Fri Sep 17 14:29:40 2010 @@ -446,7 +446,7 @@ descr)) def propagate_forward(self, op): - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py Fri Sep 17 14:29:40 2010 @@ -1922,7 +1922,7 @@ vrefbox = self.virtualref_boxes[i+1] # record VIRTUAL_REF_FINISH just before the current CALL_MAY_FORCE call_may_force_op = self.history.operations.pop() - assert call_may_force_op.opnum == rop.CALL_MAY_FORCE + assert call_may_force_op.getopnum() == rop.CALL_MAY_FORCE self.history.record(rop.VIRTUAL_REF_FINISH, [vrefbox, virtualbox], None) self.history.operations.append(call_may_force_op) @@ -2088,7 +2088,7 @@ """ Patch a CALL into a CALL_PURE. """ op = self.history.operations[-1] - assert op.opnum == rop.CALL + assert op.getopnum() == rop.CALL resbox_as_const = resbox.constbox() for i in range(op.numargs()): if not isinstance(op.getarg(i), Const): @@ -2100,7 +2100,7 @@ return resbox_as_const # not all constants (so far): turn CALL into CALL_PURE, which might # be either removed later by optimizeopt or turned back into CALL. - op.opnum = rop.CALL_PURE + op._opnum = rop.CALL_PURE # XXX XXX replace when the resoperation refactoring has been finished op._args = [resbox_as_const] + op._args return resbox @@ -2110,7 +2110,7 @@ patching the CALL_MAY_FORCE that occurred just now. """ op = self.history.operations.pop() - assert op.opnum == rop.CALL_MAY_FORCE + assert op.getopnum() == rop.CALL_MAY_FORCE num_green_args = targetjitdriver_sd.num_green_args arglist = op.getarglist() greenargs = arglist[1:num_green_args+1] @@ -2124,7 +2124,7 @@ # ^^^ and not "+=", which makes 'args' a resizable list warmrunnerstate = targetjitdriver_sd.warmstate token = warmrunnerstate.get_assembler_token(greenargs, args) - op.opnum = rop.CALL_ASSEMBLER + op._opnum = rop.CALL_ASSEMBLER op.setarglist(args) op.descr = token self.history.operations.append(op) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py Fri Sep 17 14:29:40 2010 @@ -17,19 +17,13 @@ def __init__(self, opnum, args, result, descr=None): make_sure_not_resized(args) assert isinstance(opnum, int) - self.opnum = opnum + self._opnum = opnum self._args = list(args) make_sure_not_resized(self._args) assert not isinstance(result, list) self.result = result self.setdescr(descr) - # XXX: just for debugging during the refactoring, kill me - def __setattr__(self, attr, value): - if attr == 'args': - import pdb;pdb.set_trace() - object.__setattr__(self, attr, value) - def copy_and_change(self, opnum, args=None, result=None, descr=None): "shallow copy: the returned operation is meant to be used in place of self" if args is None: @@ -43,6 +37,9 @@ newop.setfailargs(self.getfailargs()) return newop + def getopnum(self): + return self._opnum + def getarg(self, i): return self._args[i] @@ -81,7 +78,7 @@ descr = self.descr if descr is not None: descr = descr.clone_if_mutable() - op = ResOperation(self.opnum, self._args, self.result, descr) + op = ResOperation(self._opnum, self._args, self.result, descr) op.fail_args = self.fail_args op.name = self.name if not we_are_translated(): @@ -110,44 +107,44 @@ def getopname(self): try: - return opname[self.opnum].lower() + return opname[self._opnum].lower() except KeyError: - return '<%d>' % self.opnum + return '<%d>' % self._opnum def is_guard(self): - return rop._GUARD_FIRST <= self.opnum <= rop._GUARD_LAST + return rop._GUARD_FIRST <= self._opnum <= rop._GUARD_LAST def is_foldable_guard(self): - return rop._GUARD_FOLDABLE_FIRST <= self.opnum <= rop._GUARD_FOLDABLE_LAST + return rop._GUARD_FOLDABLE_FIRST <= self._opnum <= rop._GUARD_FOLDABLE_LAST def is_guard_exception(self): - return (self.opnum == rop.GUARD_EXCEPTION or - self.opnum == rop.GUARD_NO_EXCEPTION) + return (self._opnum == rop.GUARD_EXCEPTION or + self._opnum == rop.GUARD_NO_EXCEPTION) def is_guard_overflow(self): - return (self.opnum == rop.GUARD_OVERFLOW or - self.opnum == rop.GUARD_NO_OVERFLOW) + return (self._opnum == rop.GUARD_OVERFLOW or + self._opnum == rop.GUARD_NO_OVERFLOW) def is_always_pure(self): - return rop._ALWAYS_PURE_FIRST <= self.opnum <= rop._ALWAYS_PURE_LAST + return rop._ALWAYS_PURE_FIRST <= self._opnum <= rop._ALWAYS_PURE_LAST def has_no_side_effect(self): - return rop._NOSIDEEFFECT_FIRST <= self.opnum <= rop._NOSIDEEFFECT_LAST + return rop._NOSIDEEFFECT_FIRST <= self._opnum <= rop._NOSIDEEFFECT_LAST def can_raise(self): - return rop._CANRAISE_FIRST <= self.opnum <= rop._CANRAISE_LAST + return rop._CANRAISE_FIRST <= self._opnum <= rop._CANRAISE_LAST def is_ovf(self): - return rop._OVF_FIRST <= self.opnum <= rop._OVF_LAST + return rop._OVF_FIRST <= self._opnum <= rop._OVF_LAST def is_comparison(self): return self.is_always_pure() and self.returns_bool_result() def is_final(self): - return rop._FINAL_FIRST <= self.opnum <= rop._FINAL_LAST + return rop._FINAL_FIRST <= self._opnum <= rop._FINAL_LAST def returns_bool_result(self): - opnum = self.opnum + opnum = self._opnum if we_are_translated(): assert opnum >= 0 elif opnum < 0: Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py Fri Sep 17 14:29:40 2010 @@ -11,17 +11,17 @@ from pypy.jit.metainterp.history import AbstractDescr # change ARRAYCOPY to call, so we don't have to pass around # unnecessary information to the backend. Do the same with VIRTUAL_REF_*. - if op.opnum == rop.ARRAYCOPY: + if op.getopnum() == rop.ARRAYCOPY: descr = op.getarg(0) assert isinstance(descr, AbstractDescr) args = op.getarglist()[1:] op = ResOperation(rop.CALL, args, op.result, descr=descr) - elif op.opnum == rop.CALL_PURE: + elif op.getopnum() == rop.CALL_PURE: args = op.getarglist()[1:] op = ResOperation(rop.CALL, args, op.result, op.descr) - elif op.opnum == rop.VIRTUAL_REF: + elif op.getopnum() == rop.VIRTUAL_REF: op = ResOperation(rop.SAME_AS, [op.getarg(0)], op.result) - elif op.opnum == rop.VIRTUAL_REF_FINISH: + elif op.getopnum() == rop.VIRTUAL_REF_FINISH: return [] return [op] Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_oparser.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_oparser.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_oparser.py Fri Sep 17 14:29:40 2010 @@ -16,7 +16,7 @@ """ loop = parse(x) assert len(loop.operations) == 3 - assert [op.opnum for op in loop.operations] == [rop.INT_ADD, rop.INT_SUB, + assert [op.getopnum() for op in loop.operations] == [rop.INT_ADD, rop.INT_SUB, rop.FINISH] assert len(loop.inputargs) == 2 assert loop.operations[-1].descr Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py Fri Sep 17 14:29:40 2010 @@ -140,7 +140,7 @@ print '%-39s| %s' % (txt1[:39], txt2[:39]) txt1 = txt1[39:] txt2 = txt2[39:] - assert op1.opnum == op2.opnum + assert op1.getopnum() == op2.getopnum() assert op1.numargs() == op2.numargs() for i in range(op1.numargs()): x = op1.getarg(i) @@ -150,7 +150,7 @@ assert op1.result == remap[op2.result] else: remap[op2.result] = op1.result - if op1.opnum != rop.JUMP: # xxx obscure + if op1.getopnum() != rop.JUMP: # xxx obscure assert op1.descr == op2.descr if op1.fail_args or op2.fail_args: assert len(op1.fail_args) == len(op2.fail_args) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_virtualref.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_virtualref.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_virtualref.py Fri Sep 17 14:29:40 2010 @@ -71,7 +71,7 @@ # ops = self.metainterp.staticdata.stats.loops[0].operations [guard_op] = [op for op in ops - if op.opnum == rop.GUARD_NOT_FORCED] + if op.getopnum() == rop.GUARD_NOT_FORCED] bxs1 = [box for box in guard_op.fail_args if str(box._getrepr_()).endswith('.X')] assert len(bxs1) == 1 Modified: pypy/branch/resoperation-refactoring/pypy/jit/tool/showstats.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/tool/showstats.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/tool/showstats.py Fri Sep 17 14:29:40 2010 @@ -17,7 +17,7 @@ num_dmp = 0 num_guards = 0 for op in loop.operations: - if op.opnum == rop.DEBUG_MERGE_POINT: + if op.getopnum() == rop.DEBUG_MERGE_POINT: num_dmp += 1 else: num_ops += 1 From antocuni at codespeak.net Fri Sep 17 14:50:16 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Fri, 17 Sep 2010 14:50:16 +0200 (CEST) Subject: [pypy-svn] r77135 - in pypy/branch/resoperation-refactoring/pypy/jit/metainterp: . optimizeopt Message-ID: <20100917125016.2F919282B9E@codespeak.net> Author: antocuni Date: Fri Sep 17 14:50:14 2010 New Revision: 77135 Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/intbounds.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py Log: (david, antocuni): remove all the remaining references to _opnum Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/intbounds.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/intbounds.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/intbounds.py Fri Sep 17 14:50:14 2010 @@ -86,9 +86,9 @@ if resbound.has_lower and resbound.has_upper and \ self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW: # Transform into INT_ADD and remove guard - op._opnum = rop.INT_ADD + op = op.copy_and_change(rop.INT_ADD) self.skip_nextop() - self.optimize_INT_ADD(op) + self.optimize_INT_ADD(op) # emit the op else: self.emit_operation(op) r = self.getvalue(op.result) @@ -101,9 +101,9 @@ if resbound.has_lower and resbound.has_upper and \ self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW: # Transform into INT_SUB and remove guard - op._opnum = rop.INT_SUB + op = op.copy_and_change(rop.INT_SUB) self.skip_nextop() - self.optimize_INT_SUB(op) + self.optimize_INT_SUB(op) # emit the op else: self.emit_operation(op) r = self.getvalue(op.result) @@ -116,9 +116,9 @@ if resbound.has_lower and resbound.has_upper and \ self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW: # Transform into INT_MUL and remove guard - op._opnum = rop.INT_MUL + op = op.copy_and_change(rop.INT_MUL) self.skip_nextop() - self.optimize_INT_MUL(op) + self.optimize_INT_MUL(op) # emit the op else: self.emit_operation(op) r = self.getvalue(op.result) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py Fri Sep 17 14:50:14 2010 @@ -253,7 +253,7 @@ return # change the op to be a normal call, from the backend's point of view # there is no reason to have a separate operation for this - op._opnum = rop.CALL + op = op.copy_and_change(rop.CALL) self.emit_operation(op) resvalue = self.getvalue(op.result) self.optimizer.loop_invariant_results[key] = resvalue Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py Fri Sep 17 14:50:14 2010 @@ -2100,9 +2100,8 @@ return resbox_as_const # not all constants (so far): turn CALL into CALL_PURE, which might # be either removed later by optimizeopt or turned back into CALL. - op._opnum = rop.CALL_PURE - # XXX XXX replace when the resoperation refactoring has been finished - op._args = [resbox_as_const] + op._args + newop = op.copy_and_change(rop.CALL_PURE, args=[resbox_as_const]+op.getarglist()) + self.history.operations[-1] = newop return resbox def direct_assembler_call(self, targetjitdriver_sd): @@ -2124,9 +2123,7 @@ # ^^^ and not "+=", which makes 'args' a resizable list warmrunnerstate = targetjitdriver_sd.warmstate token = warmrunnerstate.get_assembler_token(greenargs, args) - op._opnum = rop.CALL_ASSEMBLER - op.setarglist(args) - op.descr = token + op = op.copy_and_change(rop.CALL_ASSEMBLER, args=args, descr=token) self.history.operations.append(op) # ____________________________________________________________ From antocuni at codespeak.net Fri Sep 17 14:54:24 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Fri, 17 Sep 2010 14:54:24 +0200 (CEST) Subject: [pypy-svn] r77137 - pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport Message-ID: <20100917125424.B2277282B9E@codespeak.net> Author: antocuni Date: Fri Sep 17 14:54:23 2010 New Revision: 77137 Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/gc.py Log: (david, antocuni): remove the last references to the about-to-be-removed _args field Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/gc.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/gc.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/gc.py Fri Sep 17 14:54:23 2010 @@ -581,16 +581,14 @@ if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL self._gen_write_barrier(newops, op.getarg(0), v) - op = ResOperation(rop.SETFIELD_RAW, op._args, None, - descr=op.descr) + op = op.copy_and_change(rop.SETFIELD_RAW) # ---------- write barrier for SETARRAYITEM_GC ---------- if op.getopnum() == rop.SETARRAYITEM_GC: v = op.getarg(2) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL self._gen_write_barrier(newops, op.getarg(0), v) - op = ResOperation(rop.SETARRAYITEM_RAW, op._args, None, - descr=op.descr) + op = op.copy_and_change(rop.SETARRAYITEM_RAW) # ---------- newops.append(op) del operations[:] From arigo at codespeak.net Fri Sep 17 14:58:03 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 17 Sep 2010 14:58:03 +0200 (CEST) Subject: [pypy-svn] r77138 - in pypy/branch/gen2-gc/pypy: config rlib rpython/memory rpython/memory/gc rpython/memory/gc/test rpython/memory/test translator/c translator/c/test Message-ID: <20100917125803.2258F282B9E@codespeak.net> Author: arigo Date: Fri Sep 17 14:58:01 2010 New Revision: 77138 Modified: pypy/branch/gen2-gc/pypy/config/translationoption.py pypy/branch/gen2-gc/pypy/rlib/rstring.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/base.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimark.py pypy/branch/gen2-gc/pypy/rpython/memory/lltypelayout.py pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py pypy/branch/gen2-gc/pypy/rpython/memory/test/test_transformed_gc.py pypy/branch/gen2-gc/pypy/translator/c/funcgen.py pypy/branch/gen2-gc/pypy/translator/c/test/test_newgc.py Log: Translation. Minor fixes, e.g. in the maximum size of UnicodeBuilder in rstring. Add support for set_max_heap_size(). Modified: pypy/branch/gen2-gc/pypy/config/translationoption.py ============================================================================== --- pypy/branch/gen2-gc/pypy/config/translationoption.py (original) +++ pypy/branch/gen2-gc/pypy/config/translationoption.py Fri Sep 17 14:58:01 2010 @@ -52,7 +52,7 @@ # gc ChoiceOption("gc", "Garbage Collection Strategy", ["boehm", "ref", "marksweep", "semispace", "statistics", - "generation", "hybrid", "markcompact", "none"], + "generation", "hybrid", "markcompact", "minimark", "none"], "ref", requires={ "ref": [("translation.rweakref", False), # XXX ("translation.gctransformer", "ref")], @@ -65,6 +65,7 @@ "hybrid": [("translation.gctransformer", "framework")], "boehm": [("translation.gctransformer", "boehm")], "markcompact": [("translation.gctransformer", "framework")], + "minimark": [("translation.gctransformer", "framework")], }, cmdline="--gc"), ChoiceOption("gctransformer", "GC transformer that is used - internal", Modified: pypy/branch/gen2-gc/pypy/rlib/rstring.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rlib/rstring.py (original) +++ pypy/branch/gen2-gc/pypy/rlib/rstring.py Fri Sep 17 14:58:01 2010 @@ -46,7 +46,9 @@ # -------------- public API --------------------------------- -INIT_SIZE = 100 # XXX tweak +# the following number is the maximum size of an RPython unicode +# string that goes into the nursery of the minimark GC. +INIT_SIZE = 56 class AbstractStringBuilder(object): def __init__(self, init_size=INIT_SIZE): Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/base.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/base.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/base.py Fri Sep 17 14:58:01 2010 @@ -5,6 +5,7 @@ from pypy.rpython.memory.support import get_address_stack, get_address_deque from pypy.rpython.memory.support import AddressDict from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage +from pypy.rlib.rarithmetic import r_uint TYPEID_MAP = lltype.GcStruct('TYPEID_MAP', ('count', lltype.Signed), ('size', lltype.Signed), @@ -146,7 +147,7 @@ return False def set_max_heap_size(self, size): - pass + raise NotImplementedError def x_swap_pool(self, newpool): return newpool @@ -340,6 +341,7 @@ "generation": "generation.GenerationGC", "hybrid": "hybrid.HybridGC", "markcompact" : "markcompact.MarkCompactGC", + "minimark" : "minimark.MiniMarkGC", } try: modulename, classname = classes[config.translation.gc].split('.') @@ -351,10 +353,12 @@ GCClass = getattr(module, classname) return GCClass, GCClass.TRANSLATION_PARAMS -def read_from_env(varname): +def _read_float_and_factor_from_env(varname): import os value = os.environ.get(varname) if value: + if len(value) > 1 and value[-1] in 'bB': + value = value[:-1] realvalue = value[:-1] if value[-1] in 'kK': factor = 1024 @@ -366,7 +370,21 @@ factor = 1 realvalue = value try: - return int(float(realvalue) * factor) + return (float(realvalue), factor) except ValueError: pass - return -1 + return (0.0, 0) + +def read_from_env(varname): + value, factor = _read_float_and_factor_from_env(varname) + return int(value * factor) + +def read_uint_from_env(varname): + value, factor = _read_float_and_factor_from_env(varname) + return r_uint(value * factor) + +def read_float_from_env(varname): + value, factor = _read_float_and_factor_from_env(varname) + if factor != 1: + return 0.0 + return value Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Fri Sep 17 14:58:01 2010 @@ -2,13 +2,14 @@ from pypy.rpython.lltypesystem import lltype, llmemory, llarena, llgroup from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.memory.gc.base import GCBase, MovingGCBase -from pypy.rpython.memory.gc import minimarkpage +from pypy.rpython.memory.gc import minimarkpage, base, generation from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask, r_uint from pypy.rlib.debug import ll_assert, debug_print, debug_start, debug_stop from pypy.rlib.objectmodel import we_are_translated WORD = LONG_BIT // 8 +NULL = llmemory.NULL first_gcflag = 1 << (LONG_BIT//2) @@ -39,9 +40,11 @@ # collection. See pypy/doc/discussion/finalizer-order.txt GCFLAG_FINALIZATION_ORDERING = first_gcflag << 4 -# Marker set to 'tid' during a minor collection when an object from -# the nursery was forwarded. -FORWARDED_MARKER = -1 + +FORWARDSTUB = lltype.GcStruct('forwarding_stub', + ('forw', llmemory.Address)) +FORWARDSTUBPTR = lltype.Ptr(FORWARDSTUB) + # ____________________________________________________________ @@ -59,14 +62,14 @@ # by GCFLAG_xxx above. HDR = lltype.Struct('header', ('tid', lltype.Signed)) typeid_is_in_field = 'tid' - #withhash_flag_is_in_field = 'tid', GCFLAG_HAS_SHADOW + withhash_flag_is_in_field = 'tid', GCFLAG_HAS_SHADOW # ^^^ prebuilt objects may have the flag GCFLAG_HAS_SHADOW; # then they are one word longer, the extra word storing the hash. # During a minor collection, the objects in the nursery that are # moved outside are changed in-place: their header is replaced with - # FORWARDED_MARKER, and the following word is set to the address of + # the value -1, and the following word is set to the address of # where the object was moved. This means that all objects in the # nursery need to be at least 2 words long, but objects outside the # nursery don't need to. @@ -75,10 +78,18 @@ TRANSLATION_PARAMS = { - # The size of the nursery. -1 means "auto", which means that it - # will look it up in the env var PYPY_GENERATIONGC_NURSERY and - # fall back to half the size of the L2 cache. - "nursery_size": -1, + # Automatically adjust the size of the nursery and the + # 'major_collection_threshold' from the environment. For + # 'nursery_size' it will look it up in the env var + # PYPY_GC_NURSERY and fall back to half the size of + # the L2 cache. For 'major_collection_threshold' it will look + # it up in the env var PYPY_GC_MAJOR_COLLECT. It also sets + # 'max_heap_size' to PYPY_GC_MAX. + "read_from_env": True, + + # The size of the nursery. Note that this is only used as a + # fall-back number. + "nursery_size": 896*1024, # The system page size. Like obmalloc.c, we assume that it is 4K, # which is OK for most systems. @@ -90,8 +101,9 @@ # The maximum size of an object allocated compactly. All objects # that are larger are just allocated with raw_malloc(). The value - # chosen here is enough for a unicode string of length 100. - "small_request_threshold": 52*WORD, + # chosen here is enough for a unicode string of length 56 (on 64-bits) + # or 60 (on 32-bits). See rlib.rstring.INIT_SIZE. + "small_request_threshold": 256-WORD, # Full collection threshold: after a major collection, we record # the total size consumed; and after every minor collection, if the @@ -101,6 +113,7 @@ } def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE, + read_from_env=False, nursery_size=32*WORD, page_size=16*WORD, arena_size=64*WORD, @@ -109,10 +122,17 @@ ArenaCollectionClass=None): MovingGCBase.__init__(self, config, chunk_size) assert small_request_threshold % WORD == 0 + self.read_from_env = read_from_env self.nursery_size = nursery_size self.small_request_threshold = small_request_threshold self.major_collection_threshold = major_collection_threshold - self.nursery_hash_base = -1 + self.num_major_collects = 0 + self.max_heap_size = 0.0 + self.max_heap_size_already_raised = False + # + self.nursery = NULL + self.nursery_free = NULL + self.nursery_top = NULL # # The ArenaCollection() handles the nonmovable objects allocation. if ArenaCollectionClass is None: @@ -128,11 +148,6 @@ # A list of all prebuilt GC objects that contain pointers to the heap self.prebuilt_root_objects = self.AddressStack() # - # Support for id and identityhash: map nursery objects with - # GCFLAG_HAS_SHADOW to their future location at the next - # minor collection. - self.young_objects_shadows = self.AddressDict() - # self._init_writebarrier_logic() @@ -143,8 +158,6 @@ # we implement differently anyway. So directly call GCBase.setup(). GCBase.setup(self) # - assert self.nursery_size > 0, "XXX" - # # A list of all raw_malloced objects (the objects too large) self.rawmalloced_objects = self.AddressStack() self.rawmalloced_total_size = r_uint(0) @@ -159,6 +172,49 @@ self.young_objects_with_weakrefs = self.AddressStack() self.old_objects_with_weakrefs = self.AddressStack() # + # Support for id and identityhash: map nursery objects with + # GCFLAG_HAS_SHADOW to their future location at the next + # minor collection. + self.young_objects_shadows = self.AddressDict() + # + # Allocate a nursery. In case of auto_nursery_size, start by + # allocating a very small nursery, enough to do things like look + # up the env var, which requires the GC; and then really + # allocate the nursery of the final size. + if not self.read_from_env: + self.allocate_nursery() + else: + # + defaultsize = self.nursery_size + minsize = 18 * self.small_request_threshold + self.nursery_size = minsize + self.allocate_nursery() + # + # From there on, the GC is fully initialized and the code + # below can use it + newsize = base.read_from_env('PYPY_GC_NURSERY') + if newsize <= 0: + newsize = generation.estimate_best_nursery_size() + if newsize <= 0: + newsize = defaultsize + # + major_coll = base.read_float_from_env('PYPY_GC_MAJOR_COLLECT') + if major_coll >= 1.0: + self.major_collection_threshold = major_coll + # + max_heap_size = base.read_uint_from_env('PYPY_GC_MAX') + if max_heap_size > 0: + self.max_heap_size = float(max_heap_size) + # + self.minor_collection() # to empty the nursery + llarena.arena_free(self.nursery) + self.nursery_size = max(newsize, minsize) + self.allocate_nursery() + + + def allocate_nursery(self): + debug_start("gc-set-nursery-size") + debug_print("nursery size:", self.nursery_size) # the start of the nursery: we actually allocate a tiny bit more for # the nursery than really needed, to simplify pointer arithmetic # in malloc_fixedsize_clear(). @@ -167,12 +223,13 @@ if not self.nursery: raise MemoryError("cannot allocate nursery") # the current position in the nursery: - self.nursery_next = self.nursery + self.nursery_free = self.nursery # the end of the nursery: self.nursery_top = self.nursery + self.nursery_size # initialize the threshold, a bit arbitrarily self.next_major_collection_threshold = ( self.nursery_size * self.major_collection_threshold) + debug_stop("gc-set-nursery-size") def malloc_fixedsize_clear(self, typeid, size, can_collect=True, @@ -205,9 +262,9 @@ # # Get the memory from the nursery. If there is not enough space # there, do a collect first. - result = self.nursery_next - self.nursery_next = result + totalsize - if self.nursery_next > self.nursery_top: + result = self.nursery_free + self.nursery_free = result + totalsize + if self.nursery_free > self.nursery_top: result = self.collect_and_reserve(totalsize) # # Build the object. @@ -256,9 +313,9 @@ # # Get the memory from the nursery. If there is not enough space # there, do a collect first. - result = self.nursery_next - self.nursery_next = result + totalsize - if self.nursery_next > self.nursery_top: + result = self.nursery_free + self.nursery_free = result + totalsize + if self.nursery_free > self.nursery_top: result = self.collect_and_reserve(totalsize) # # Build the object. @@ -277,7 +334,7 @@ self.major_collection() def collect_and_reserve(self, totalsize): - """To call when nursery_next overflows nursery_top. + """To call when nursery_free overflows nursery_top. Do a minor collection, and possibly also a major collection, and finally reserve 'totalsize' bytes at the start of the now-empty nursery. @@ -290,19 +347,21 @@ # The nursery might not be empty now, because of # execute_finalizers(). If it is almost full again, # we need to fix it with another call to minor_collection(). - if self.nursery_next + totalsize > self.nursery_top: + if self.nursery_free + totalsize > self.nursery_top: self.minor_collection() # - result = self.nursery_next - self.nursery_next = result + totalsize - ll_assert(self.nursery_next <= self.nursery_top, "nursery overflow") + result = self.nursery_free + self.nursery_free = result + totalsize + ll_assert(self.nursery_free <= self.nursery_top, "nursery overflow") return result collect_and_reserve._dont_inline_ = True - def _full_collect_if_needed(self): - if self.get_total_memory_used() > self.next_major_collection_threshold: - self.collect() + def _full_collect_if_needed(self, reserving_size): + if (float(self.get_total_memory_used()) + reserving_size > + self.next_major_collection_threshold): + self.minor_collection() + self.major_collection(reserving_size) def _reserve_external_memory(self, totalsize): """Do a raw_malloc() to get some external memory. @@ -322,7 +381,7 @@ # # If somebody calls _external_malloc() a lot, we must eventually # force a full collection. - self._full_collect_if_needed() + self._full_collect_if_needed(totalsize) # result = self._reserve_external_memory(totalsize) llmemory.raw_memclear(result, totalsize) @@ -336,7 +395,7 @@ # # If somebody calls _malloc_nonmovable() a lot, we must eventually # force a full collection. - self._full_collect_if_needed() + self._full_collect_if_needed(totalsize) # rawtotalsize = llmemory.raw_malloc_usage(totalsize) if rawtotalsize <= self.small_request_threshold: @@ -368,7 +427,10 @@ # Other functions in the GC API def set_max_heap_size(self, size): - XXX + self.max_heap_size = float(size) + if self.max_heap_size > 0.0: + if self.max_heap_size < self.next_major_collection_threshold: + self.next_major_collection_threshold = self.max_heap_size def can_malloc_nonmovable(self): return True @@ -399,11 +461,13 @@ def malloc_fixedsize_nonmovable(self, typeid): + """NOT_RPYTHON: not tested translated""" size_gc_header = self.gcheaderbuilder.size_gc_header totalsize = size_gc_header + self.fixed_size(typeid) # result = self._malloc_nonmovable(typeid, totalsize) - return result + size_gc_header + obj = result + size_gc_header + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) def malloc_varsize_nonmovable(self, typeid, length): size_gc_header = self.gcheaderbuilder.size_gc_header @@ -419,15 +483,15 @@ result = self._malloc_nonmovable(typeid, totalsize) obj = result + size_gc_header (obj + offset_to_length).signed[0] = length - return obj + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) def malloc_nonmovable(self, typeid, length, zero): # helper for testing, same as GCBase.malloc if self.is_varsize(typeid): - obj = self.malloc_varsize_nonmovable(typeid, length) + gcref = self.malloc_varsize_nonmovable(typeid, length) else: - obj = self.malloc_fixedsize_nonmovable(typeid) - return obj + gcref = self.malloc_fixedsize_nonmovable(typeid) + return gcref # ---------- @@ -441,7 +505,6 @@ return llop.combine_ushort(lltype.Signed, typeid16, flags) def init_gc_object(self, addr, typeid16, flags=0): - #print "init_gc_object(%r, 0x%x)" % (addr, flags) # The default 'flags' is zero. The flags GCFLAG_NO_xxx_PTRS # have been chosen to allow 'flags' to be zero in the common # case (hence the 'NO' in their name). @@ -460,13 +523,15 @@ return self.nursery <= addr < self.nursery_top def is_forwarded(self, obj): + """Returns True if the nursery obj is marked as forwarded. + Implemented a bit obscurely by checking an unrelated flag + that can never be set on a young object -- except if tid == -1. + """ assert self.is_in_nursery(obj) - tid = self.header(obj).tid - return isinstance(tid, int) and tid == FORWARDED_MARKER + return self.header(obj).tid & GCFLAG_FINALIZATION_ORDERING def get_forwarding_address(self, obj): - obj = llarena.getfakearenaaddress(obj) - return obj.address[0] + return llmemory.cast_adr_to_ptr(obj, FORWARDSTUBPTR).forw def get_total_memory_used(self): """Return the total memory used, not counting any object in the @@ -481,6 +546,10 @@ # similarily, all objects should have this flag: ll_assert(self.header(obj).tid & GCFLAG_NO_YOUNG_PTRS, "missing GCFLAG_NO_YOUNG_PTRS") + # if we have GCFLAG_NO_HEAP_PTRS, then we have GCFLAG_NO_YOUNG_PTRS + if self.header(obj).tid & GCFLAG_NO_HEAP_PTRS: + ll_assert(self.header(obj).tid & GCFLAG_NO_YOUNG_PTRS, + "GCFLAG_NO_HEAP_PTRS && !GCFLAG_NO_YOUNG_PTRS") # the GCFLAG_VISITED should not be set between collections ll_assert(self.header(obj).tid & GCFLAG_VISITED == 0, "unexpected GCFLAG_VISITED") @@ -547,17 +616,17 @@ def assume_young_pointers(self, addr_struct): - """Called occasionally by the JIT to mean 'assume that 'addr_struct' - may now contain young pointers. + """Called occasionally by the JIT to mean ``assume that 'addr_struct' + may now contain young pointers.'' """ - XXX objhdr = self.header(addr_struct) if objhdr.tid & GCFLAG_NO_YOUNG_PTRS: self.old_objects_pointing_to_young.append(addr_struct) objhdr.tid &= ~GCFLAG_NO_YOUNG_PTRS - if objhdr.tid & GCFLAG_NO_HEAP_PTRS: - objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS - self.last_generation_root_objects.append(addr_struct) + # + if objhdr.tid & GCFLAG_NO_HEAP_PTRS: + objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS + self.prebuilt_root_objects.append(addr_struct) def writebarrier_before_copy(self, source_addr, dest_addr): """ This has the same effect as calling writebarrier over @@ -618,7 +687,7 @@ # All live nursery objects are out, and the rest dies. Fill # the whole nursery with zero and reset the current nursery pointer. llarena.arena_reset(self.nursery, self.nursery_size, 2) - self.nursery_next = self.nursery + self.nursery_free = self.nursery # debug_print("minor collect, total memory used:", self.get_total_memory_used()) @@ -633,9 +702,9 @@ # then the write_barrier must have ensured that the prebuilt # GcStruct is in the list self.old_objects_pointing_to_young. self.root_walker.walk_roots( - MiniMarkGC._trace_drag_out, # stack roots - MiniMarkGC._trace_drag_out, # static in prebuilt non-gc - None) # static in prebuilt gc + MiniMarkGC._trace_drag_out1, # stack roots + MiniMarkGC._trace_drag_out1, # static in prebuilt non-gc + None) # static in prebuilt gc def collect_oldrefs_to_nursery(self): # Follow the old_objects_pointing_to_young list and move the @@ -660,7 +729,10 @@ self.trace(obj, self._trace_drag_out, None) - def _trace_drag_out(self, root, ignored=None): + def _trace_drag_out1(self, root): + self._trace_drag_out(root, None) + + def _trace_drag_out(self, root, ignored): obj = root.address[0] # # If 'obj' is not in the nursery, nothing to change. @@ -670,7 +742,6 @@ # If 'obj' was already forwarded, change it to its forwarding address. if self.is_forwarded(obj): root.address[0] = self.get_forwarding_address(obj) - #print '(already forwarded)' return # # First visit to 'obj': we must move it out of the nursery. @@ -686,10 +757,7 @@ else: # The object has already a shadow. newobj = self.young_objects_shadows.get(obj) - ll_assert(newobj, "GCFLAG_HAS_SHADOW but not shadow found") - #print 'moving object %r into shadow %r' % ( - # llarena.getfakearenaaddress(obj), - # llarena.getfakearenaaddress(newobj),) + ll_assert(newobj != NULL, "GCFLAG_HAS_SHADOW but no shadow found") newhdr = newobj - size_gc_header # # Remove the flag GCFLAG_HAS_SHADOW, so that it doesn't get @@ -700,21 +768,19 @@ # nursery are kept unchanged in this step. llmemory.raw_memcopy(obj - size_gc_header, newhdr, totalsize) # - # Set the old object's tid to FORWARDED_MARKER and replace - # the old object's content with the target address. + # Set the old object's tid to -1 (containing all flags) and + # replace the old object's content with the target address. # A bit of no-ops to convince llarena that we are changing # the layout, in non-translated versions. - llarena.arena_reset(obj - size_gc_header, totalsize, 0) - llarena.arena_reserve(obj - size_gc_header, llmemory.sizeof(self.HDR)) - llarena.arena_reserve(obj, llmemory.sizeof(llmemory.Address)) - self.header(obj).tid = FORWARDED_MARKER obj = llarena.getfakearenaaddress(obj) + llarena.arena_reset(obj - size_gc_header, totalsize, 0) + llarena.arena_reserve(obj - size_gc_header, + size_gc_header + llmemory.sizeof(FORWARDSTUB)) + self.header(obj).tid = -1 newobj = newhdr + size_gc_header - obj.address[0] = newobj + llmemory.cast_adr_to_ptr(obj, FORWARDSTUBPTR).forw = newobj # # Change the original pointer to this object. - #print - #print '\t\t\t->', llarena.getfakearenaaddress(newobj - size_gc_header) root.address[0] = newobj # # Add the newobj to the list 'old_objects_pointing_to_young', @@ -727,7 +793,7 @@ # ---------- # Full collection - def major_collection(self): + def major_collection(self, reserving_size=0): """Do a major collection. Only for when the nursery is empty.""" # debug_start("gc-collect") @@ -740,7 +806,7 @@ self.rawmalloced_total_size, "bytes") # # Debugging checks - ll_assert(self.nursery_next == self.nursery, + ll_assert(self.nursery_free == self.nursery, "nursery not empty in major_collection()") self.debug_check_consistency() # @@ -779,17 +845,45 @@ # self.debug_check_consistency() # - self.next_major_collection_threshold = ( - self.get_total_memory_used() * self.major_collection_threshold) - # + self.num_major_collects += 1 debug_print("| used after collection:") debug_print("| in ArenaCollection: ", self.ac.total_memory_used, "bytes") debug_print("| raw_malloced: ", self.rawmalloced_total_size, "bytes") + debug_print("| number of major collects: ", + self.num_major_collects) debug_print("`----------------------------------------------") debug_stop("gc-collect") # + # Set the threshold for the next major collection to be when we + # have allocated 'major_collection_threshold' times more than + # we currently have. + self.next_major_collection_threshold = ( + (self.get_total_memory_used() * self.major_collection_threshold) + + reserving_size) + # + # Max heap size: gives an upper bound on the threshold. If we + # already have at least this much allocated, raise MemoryError. + if (self.max_heap_size > 0.0 and + self.next_major_collection_threshold > self.max_heap_size): + # + self.next_major_collection_threshold = self.max_heap_size + if (float(self.get_total_memory_used()) + reserving_size >= + self.next_major_collection_threshold): + # + # First raise MemoryError, giving the program a chance to + # quit cleanly. It might still allocate in the nursery, + # which might eventually be emptied, triggering another + # major collect and (possibly) reaching here again with an + # even higher memory consumption. To prevent it, if it's + # the second time we are here, then abort the program. + if self.max_heap_size_already_raised: + llop.debug_fatalerror(lltype.Void, + "Using too much memory, aborting") + self.max_heap_size_already_raised = True + raise MemoryError + # # At the end, we can execute the finalizers of the objects # listed in 'run_finalizers'. Note that this will typically do # more allocations. @@ -805,7 +899,7 @@ else: return True # dies - def _reset_gcflag_visited(self, obj, ignored=None): + def _reset_gcflag_visited(self, obj, ignored): self.header(obj).tid &= ~GCFLAG_VISITED def free_unvisited_rawmalloc_objects(self): @@ -848,7 +942,10 @@ def _collect_obj(obj, objects_to_trace): objects_to_trace.append(obj) - def _collect_ref(self, root, ignored=None): + def _collect_ref(self, root): + self.objects_to_trace.append(root.address[0]) + + def _collect_ref_rec(self, root, ignored): self.objects_to_trace.append(root.address[0]) def visit_all_objects(self): @@ -877,7 +974,7 @@ # # Trace the content of the object and put all objects it references # into the 'objects_to_trace' list. - self.trace(obj, self._collect_ref, None) + self.trace(obj, self._collect_ref_rec, None) # ---------- @@ -898,7 +995,8 @@ # collection if self.header(obj).tid & GCFLAG_HAS_SHADOW: shadow = self.young_objects_shadows.get(obj) - ll_assert(shadow, "GCFLAG_HAS_SHADOW but not shadow found") + ll_assert(shadow != NULL, + "GCFLAG_HAS_SHADOW but no shadow found") else: size_gc_header = self.gcheaderbuilder.size_gc_header size = self.get_size(obj) Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py Fri Sep 17 14:58:01 2010 @@ -68,10 +68,11 @@ self.nblocks_for_size = lltype.malloc(rffi.CArray(lltype.Signed), length, flavor='raw') self.hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) + self.nblocks_for_size[0] = 0 # unused for i in range(1, length): self.nblocks_for_size[i] = (page_size - self.hdrsize) // (WORD * i) # - self.uninitialized_pages = PAGE_NULL + self.uninitialized_pages = NULL self.num_uninitialized_pages = 0 self.free_pages = NULL self.total_memory_used = r_uint(0) @@ -328,7 +329,8 @@ def start_of_page(addr, page_size): """Return the address of the start of the page that contains 'addr'.""" if we_are_translated(): - xxx + offset = llmemory.cast_adr_to_int(addr) % page_size + return addr - offset else: return _start_of_page_untranslated(addr, page_size) Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimark.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimark.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimark.py Fri Sep 17 14:58:01 2010 @@ -13,8 +13,13 @@ from pypy.rlib.rstring import INIT_SIZE from pypy.rpython.lltypesystem.rstr import STR, UNICODE # + size_gc_header = llmemory.raw_malloc_usage( + llmemory.sizeof(llmemory.Address)) + # size1 = llmemory.raw_malloc_usage(llmemory.sizeof(STR, INIT_SIZE)) + size1 = size_gc_header + size1 assert size1 <= MiniMarkGC.TRANSLATION_PARAMS["small_request_threshold"] # size2 = llmemory.raw_malloc_usage(llmemory.sizeof(UNICODE, INIT_SIZE)) + size2 = size_gc_header + size2 assert size2 <= MiniMarkGC.TRANSLATION_PARAMS["small_request_threshold"] Modified: pypy/branch/gen2-gc/pypy/rpython/memory/lltypelayout.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/lltypelayout.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/lltypelayout.py Fri Sep 17 14:58:01 2010 @@ -7,7 +7,7 @@ primitive_to_fmt = {lltype.Signed: "l", lltype.Unsigned: "L", lltype.Char: "c", - lltype.UniChar: "H", # maybe + lltype.UniChar: "i", # 4 bytes lltype.Bool: "B", lltype.Float: "d", llmemory.Address: "P", Modified: pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/test/test_gc.py Fri Sep 17 14:58:01 2010 @@ -26,7 +26,7 @@ class GCTest(object): GC_PARAMS = {} GC_CAN_MOVE = False - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True GC_CAN_SHRINK_ARRAY = False GC_CAN_SHRINK_BIG_ARRAY = False @@ -452,10 +452,10 @@ a = rgc.malloc_nonmovable(TP, 3) if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 - assert self.interpret(func, []) == int(self.GC_CANNOT_MALLOC_NONMOVABLE) + assert self.interpret(func, []) == int(self.GC_CAN_MALLOC_NONMOVABLE) def test_malloc_nonmovable_fixsize(self): S = lltype.GcStruct('S', ('x', lltype.Float)) @@ -466,12 +466,12 @@ rgc.collect() if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 except Exception, e: return 2 - assert self.interpret(func, []) == int(self.GC_CANNOT_MALLOC_NONMOVABLE) + assert self.interpret(func, []) == int(self.GC_CAN_MALLOC_NONMOVABLE) def test_shrink_array(self): from pypy.rpython.lltypesystem.rstr import STR @@ -628,7 +628,7 @@ class TestSemiSpaceGC(GCTest, snippet.SemiSpaceGCTests): from pypy.rpython.memory.gc.semispace import SemiSpaceGC as GCClass GC_CAN_MOVE = True - GC_CANNOT_MALLOC_NONMOVABLE = True + GC_CAN_MALLOC_NONMOVABLE = False GC_CAN_SHRINK_ARRAY = True GC_CAN_SHRINK_BIG_ARRAY = True @@ -649,7 +649,7 @@ class TestHybridGC(TestGenerationalGC): from pypy.rpython.memory.gc.hybrid import HybridGC as GCClass - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True GC_CAN_SHRINK_BIG_ARRAY = False def test_ref_from_rawmalloced_to_regular(self): @@ -720,7 +720,7 @@ from pypy.rpython.memory.gc.hybrid import HybridGC as GCClass GC_CAN_MOVE = False # with this size of heap, stuff gets allocated # in 3rd gen. - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True GC_PARAMS = {'space_size': 48*WORD, 'min_nursery_size': 12*WORD, 'nursery_size': 12*WORD, @@ -769,4 +769,4 @@ class TestMiniMarkGC(TestSemiSpaceGC): from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass GC_CAN_SHRINK_BIG_ARRAY = False - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True Modified: pypy/branch/gen2-gc/pypy/rpython/memory/test/test_transformed_gc.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/test/test_transformed_gc.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/test/test_transformed_gc.py Fri Sep 17 14:58:01 2010 @@ -47,7 +47,7 @@ gcpolicy = None stacklessgc = False GC_CAN_MOVE = False - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True taggedpointers = False def setup_class(cls): @@ -602,8 +602,8 @@ rgc.collect() if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 #except Exception, e: # return 2 @@ -611,7 +611,7 @@ def test_malloc_nonmovable(self): run = self.runner("malloc_nonmovable") - assert int(self.GC_CANNOT_MALLOC_NONMOVABLE) == run([]) + assert int(self.GC_CAN_MALLOC_NONMOVABLE) == run([]) def define_malloc_nonmovable_fixsize(cls): S = lltype.GcStruct('S', ('x', lltype.Float)) @@ -622,8 +622,8 @@ rgc.collect() if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 except Exception, e: return 2 @@ -631,7 +631,7 @@ def test_malloc_nonmovable_fixsize(self): run = self.runner("malloc_nonmovable_fixsize") - assert run([]) == int(self.GC_CANNOT_MALLOC_NONMOVABLE) + assert run([]) == int(self.GC_CAN_MALLOC_NONMOVABLE) def define_shrink_array(cls): from pypy.rpython.lltypesystem.rstr import STR @@ -680,7 +680,8 @@ class GenericMovingGCTests(GenericGCTests): GC_CAN_MOVE = True - GC_CANNOT_MALLOC_NONMOVABLE = True + GC_CAN_MALLOC_NONMOVABLE = False + GC_CAN_TEST_ID = False def define_many_ids(cls): class A(object): @@ -710,7 +711,8 @@ return f def test_many_ids(self): - py.test.skip("fails for bad reasons in lltype.py :-(") + if not self.GC_CAN_TEST_ID: + py.test.skip("fails for bad reasons in lltype.py :-(") run = self.runner("many_ids") run([]) @@ -856,7 +858,7 @@ # (and give fixedsize) def define_writebarrier_before_copy(cls): - S = lltype.GcStruct('S') + S = lltype.GcStruct('S', ('x', lltype.Char)) TP = lltype.GcArray(lltype.Ptr(S)) def fn(): l = lltype.malloc(TP, 100) @@ -1144,10 +1146,6 @@ GC_PARAMS = {'space_size': 4096*WORD} root_stack_depth = 200 - def test_writebarrier_before_copy(self): - py.test.skip("Not relevant, and crashes because llarena does not " - "support empty GcStructs") - class TestGenerationGC(GenericMovingGCTests): gcname = "generation" GC_CAN_SHRINK_ARRAY = True @@ -1379,7 +1377,7 @@ class TestHybridGC(TestGenerationGC): gcname = "hybrid" - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True class gcpolicy(gc.FrameworkGcPolicy): class transformerclass(framework.FrameworkGCTransformer): @@ -1444,6 +1442,21 @@ def test_malloc_nonmovable_fixsize(self): py.test.skip("not supported") + +class TestMiniMarkGC(TestHybridGC): + gcname = "minimark" + GC_CAN_TEST_ID = True + + class gcpolicy(gc.FrameworkGcPolicy): + class transformerclass(framework.FrameworkGCTransformer): + from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass + GC_PARAMS = {'nursery_size': 32*WORD, + 'page_size': 16*WORD, + 'arena_size': 64*WORD, + 'small_request_threshold': 5*WORD, + } + root_stack_depth = 200 + # ________________________________________________________________ # tagged pointers Modified: pypy/branch/gen2-gc/pypy/translator/c/funcgen.py ============================================================================== --- pypy/branch/gen2-gc/pypy/translator/c/funcgen.py (original) +++ pypy/branch/gen2-gc/pypy/translator/c/funcgen.py Fri Sep 17 14:58:01 2010 @@ -733,6 +733,8 @@ continue elif T == Signed: format.append('%ld') + elif T == Unsigned: + format.append('%lu') elif T == Float: format.append('%f') elif isinstance(T, Ptr) or T == Address: Modified: pypy/branch/gen2-gc/pypy/translator/c/test/test_newgc.py ============================================================================== --- pypy/branch/gen2-gc/pypy/translator/c/test/test_newgc.py (original) +++ pypy/branch/gen2-gc/pypy/translator/c/test/test_newgc.py Fri Sep 17 14:58:01 2010 @@ -19,10 +19,11 @@ removetypeptr = False taggedpointers = False GC_CAN_MOVE = False - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True GC_CAN_SHRINK_ARRAY = False _isolated_func = None + c_allfuncs = None @classmethod def _makefunc_str_int(cls, f): @@ -111,6 +112,7 @@ def teardown_class(cls): if hasattr(cls.c_allfuncs, 'close_isolate'): cls.c_allfuncs.close_isolate() + cls.c_allfuncs = None def run(self, name, *args): if not args: @@ -690,8 +692,8 @@ rgc.collect() if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 except Exception, e: return 2 @@ -699,7 +701,7 @@ def test_malloc_nonmovable(self): res = self.run('malloc_nonmovable') - assert res == self.GC_CANNOT_MALLOC_NONMOVABLE + assert res == self.GC_CAN_MALLOC_NONMOVABLE def define_resizable_buffer(cls): from pypy.rpython.lltypesystem.rstr import STR @@ -896,7 +898,7 @@ gcpolicy = "semispace" should_be_moving = True GC_CAN_MOVE = True - GC_CANNOT_MALLOC_NONMOVABLE = True + GC_CAN_MALLOC_NONMOVABLE = False GC_CAN_SHRINK_ARRAY = True # for snippets @@ -1055,7 +1057,7 @@ class TestHybridGC(TestGenerationalGC): gcpolicy = "hybrid" should_be_moving = True - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True def test_gc_set_max_heap_size(self): py.test.skip("not implemented") @@ -1126,6 +1128,15 @@ res = self.run("adding_a_hash") assert res == 0 +class TestMiniMarkGC(TestSemiSpaceGC): + gcpolicy = "minimark" + should_be_moving = True + GC_CAN_MALLOC_NONMOVABLE = True + GC_CAN_SHRINK_ARRAY = True + + def test_gc_heap_stats(self): + py.test.skip("not implemented") + # ____________________________________________________________________ class TaggedPointersTest(object): From fijal at codespeak.net Fri Sep 17 15:06:27 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Fri, 17 Sep 2010 15:06:27 +0200 (CEST) Subject: [pypy-svn] r77139 - pypy/branch/rsocket-improvements/pypy/interpreter Message-ID: <20100917130627.847F5282B9E@codespeak.net> Author: fijal Date: Fri Sep 17 15:06:25 2010 New Revision: 77139 Modified: pypy/branch/rsocket-improvements/pypy/interpreter/baseobjspace.py Log: This should get rid of the last undeletable assertion Modified: pypy/branch/rsocket-improvements/pypy/interpreter/baseobjspace.py ============================================================================== --- pypy/branch/rsocket-improvements/pypy/interpreter/baseobjspace.py (original) +++ pypy/branch/rsocket-improvements/pypy/interpreter/baseobjspace.py Fri Sep 17 15:06:25 2010 @@ -71,7 +71,8 @@ space.wrap("__class__ assignment: only for heap types")) def user_setup(self, space, w_subtype): - assert False, "only for interp-level user subclasses from typedef.py" + raise NotImplementedError("only for interp-level user subclasses " + "from typedef.py") def getname(self, space, default): try: From fijal at codespeak.net Fri Sep 17 15:13:12 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Fri, 17 Sep 2010 15:13:12 +0200 (CEST) Subject: [pypy-svn] r77140 - pypy/branch/rsocket-improvements/pypy/rpython/numpy Message-ID: <20100917131312.61641282B9E@codespeak.net> Author: fijal Date: Fri Sep 17 15:13:11 2010 New Revision: 77140 Removed: pypy/branch/rsocket-improvements/pypy/rpython/numpy/ Log: Remove this, is not used for years by now From arigo at codespeak.net Fri Sep 17 15:15:52 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 17 Sep 2010 15:15:52 +0200 (CEST) Subject: [pypy-svn] r77142 - in pypy/trunk/pypy: module/gc module/gc/test rlib rlib/test rpython rpython/memory rpython/memory/gc rpython/memory/gctransform translator/c/test Message-ID: <20100917131552.E6A4F282B9E@codespeak.net> Author: arigo Date: Fri Sep 17 15:15:50 2010 New Revision: 77142 Added: pypy/trunk/pypy/module/gc/referents.py - copied unchanged from r77139, pypy/branch/gc-module/pypy/module/gc/referents.py pypy/trunk/pypy/module/gc/test/test_referents.py - copied unchanged from r77139, pypy/branch/gc-module/pypy/module/gc/test/test_referents.py pypy/trunk/pypy/rpython/memory/gc/inspect.py - copied unchanged from r77139, pypy/branch/gc-module/pypy/rpython/memory/gc/inspect.py Modified: pypy/trunk/pypy/module/gc/__init__.py pypy/trunk/pypy/module/gc/interp_gc.py pypy/trunk/pypy/module/gc/test/test_gc.py pypy/trunk/pypy/rlib/rgc.py pypy/trunk/pypy/rlib/test/test_rgc.py pypy/trunk/pypy/rpython/memory/gc/base.py pypy/trunk/pypy/rpython/memory/gc/markcompact.py pypy/trunk/pypy/rpython/memory/gctransform/framework.py pypy/trunk/pypy/rpython/memory/gctypelayout.py pypy/trunk/pypy/rpython/rptr.py pypy/trunk/pypy/translator/c/test/test_newgc.py Log: Merge branch/gc-module. Adds some standard gc functions like gc.get_referrers(), and some custom ones too. Modified: pypy/trunk/pypy/module/gc/__init__.py ============================================================================== --- pypy/trunk/pypy/module/gc/__init__.py (original) +++ pypy/trunk/pypy/module/gc/__init__.py Fri Sep 17 15:15:50 2010 @@ -10,13 +10,22 @@ 'collect': 'interp_gc.collect', 'enable_finalizers': 'interp_gc.enable_finalizers', 'disable_finalizers': 'interp_gc.disable_finalizers', - 'estimate_heap_size': 'interp_gc.estimate_heap_size', 'garbage' : 'space.newlist([])', #'dump_heap_stats': 'interp_gc.dump_heap_stats', } def __init__(self, space, w_name): - ts = space.config.translation.type_system - if ts == 'ootype': - del self.interpleveldefs['dump_heap_stats'] + if (not space.config.translating or + space.config.translation.gctransformer == "framework"): + self.interpleveldefs.update({ + 'get_rpy_roots': 'referents.get_rpy_roots', + 'get_rpy_referents': 'referents.get_rpy_referents', + 'get_rpy_memory_usage': 'referents.get_rpy_memory_usage', + 'get_rpy_type_index': 'referents.get_rpy_type_index', + 'get_objects': 'referents.get_objects', + 'get_referents': 'referents.get_referents', + 'get_referrers': 'referents.get_referrers', + 'dump_rpy_heap': 'referents.dump_rpy_heap', + 'GcRef': 'referents.W_GcRef', + }) MixedModule.__init__(self, space, w_name) Modified: pypy/trunk/pypy/module/gc/interp_gc.py ============================================================================== --- pypy/trunk/pypy/module/gc/interp_gc.py (original) +++ pypy/trunk/pypy/module/gc/interp_gc.py Fri Sep 17 15:15:50 2010 @@ -24,36 +24,6 @@ # ____________________________________________________________ -import sys -platform = sys.platform - -def estimate_heap_size(space): - # XXX should be done with the help of the GCs - if platform == "linux2": - import os - pid = os.getpid() - try: - fd = os.open("/proc/" + str(pid) + "/status", os.O_RDONLY, 0777) - except OSError: - pass - else: - try: - content = os.read(fd, 1000000) - finally: - os.close(fd) - lines = content.split("\n") - for line in lines: - if line.startswith("VmSize:"): - start = line.find(" ") # try to ignore tabs - assert start > 0 - stop = len(line) - 3 - assert stop > 0 - result = int(line[start:stop].strip(" ")) * 1024 - return space.wrap(result) - raise OperationError(space.w_RuntimeError, - space.wrap("can't estimate the heap size")) -estimate_heap_size.unwrap_spec = [ObjSpace] - def dump_heap_stats(space, filename): tb = rgc._heap_stats() if not tb: Modified: pypy/trunk/pypy/module/gc/test/test_gc.py ============================================================================== --- pypy/trunk/pypy/module/gc/test/test_gc.py (original) +++ pypy/trunk/pypy/module/gc/test/test_gc.py Fri Sep 17 15:15:50 2010 @@ -59,13 +59,6 @@ raises(ValueError, gc.enable_finalizers) runtest(True) - def test_estimate_heap_size(self): - import sys, gc - if sys.platform == "linux2": - assert gc.estimate_heap_size() > 1024 - else: - raises(RuntimeError, gc.estimate_heap_size) - def test_enable(self): import gc assert gc.isenabled() Modified: pypy/trunk/pypy/rlib/rgc.py ============================================================================== --- pypy/trunk/pypy/rlib/rgc.py (original) +++ pypy/trunk/pypy/rlib/rgc.py Fri Sep 17 15:15:50 2010 @@ -1,6 +1,7 @@ -import gc +import gc, types from pypy.rpython.extregistry import ExtRegistryEntry from pypy.rlib.objectmodel import we_are_translated +from pypy.rpython.lltypesystem import lltype, llmemory # ____________________________________________________________ # General GC features @@ -93,7 +94,7 @@ def specialize_call(self, hop): from pypy.rpython.error import TyperError - from pypy.rpython.lltypesystem import lltype, llmemory, rtuple + from pypy.rpython.lltypesystem import rtuple from pypy.annotation import model as annmodel from pypy.rpython.memory.gc.marksweep import X_CLONE, X_CLONE_PTR @@ -150,7 +151,6 @@ return annmodel.s_None def specialize_call(self, hop): - from pypy.rpython.lltypesystem import lltype hop.exception_cannot_occur() args_v = [] if len(hop.args_s) == 1: @@ -165,7 +165,6 @@ return annmodel.s_None def specialize_call(self, hop): - from pypy.rpython.lltypesystem import lltype [v_nbytes] = hop.inputargs(lltype.Signed) hop.exception_cannot_occur() return hop.genop('gc_set_max_heap_size', [v_nbytes], @@ -182,7 +181,6 @@ return annmodel.SomeBool() def specialize_call(self, hop): - from pypy.rpython.lltypesystem import lltype hop.exception_cannot_occur() return hop.genop('gc_can_move', hop.args_v, resulttype=hop.r_result) @@ -195,11 +193,9 @@ def compute_result_annotation(self): from pypy.annotation import model as annmodel from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP - from pypy.rpython.lltypesystem import lltype return annmodel.SomePtr(lltype.Ptr(ARRAY_TYPEID_MAP)) def specialize_call(self, hop): - from pypy.rpython.lltypesystem import lltype from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP hop.exception_is_here() return hop.genop('gc_heap_stats', [], resulttype=hop.r_result) @@ -209,7 +205,6 @@ When running directly, will pretend that gc is always moving (might be configurable in a future) """ - from pypy.rpython.lltypesystem import lltype return lltype.nullptr(TP) class MallocNonMovingEntry(ExtRegistryEntry): @@ -221,7 +216,6 @@ return malloc(s_TP, s_n, s_zero=s_zero) def specialize_call(self, hop, i_zero=None): - from pypy.rpython.lltypesystem import lltype # XXX assume flavor and zero to be None by now assert hop.args_s[0].is_constant() vlist = [hop.inputarg(lltype.Void, arg=0)] @@ -243,7 +237,6 @@ def ll_arraycopy(source, dest, source_start, dest_start, length): from pypy.rpython.lltypesystem.lloperation import llop - from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rlib.objectmodel import keepalive_until_here # supports non-overlapping copies only @@ -279,7 +272,6 @@ def ll_shrink_array(p, smallerlength): from pypy.rpython.lltypesystem.lloperation import llop - from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rlib.objectmodel import keepalive_until_here if llop.shrink_array(lltype.Bool, p, smallerlength): @@ -313,3 +305,221 @@ func._dont_inline_ = True func._gc_no_collect_ = True return func + +# ____________________________________________________________ + +def get_rpy_roots(): + "NOT_RPYTHON" + # Return the 'roots' from the GC. + # This stub is not usable on top of CPython. + # The gc typically returns a list that ends with a few NULL_GCREFs. + raise NotImplementedError + +def get_rpy_referents(gcref): + "NOT_RPYTHON" + x = gcref._x + if isinstance(x, list): + d = x + elif isinstance(x, dict): + d = x.keys() + x.values() + else: + d = [] + if hasattr(x, '__dict__'): + d = x.__dict__.values() + if hasattr(type(x), '__slots__'): + for slot in type(x).__slots__: + try: + d.append(getattr(x, slot)) + except AttributeError: + pass + # discard objects that are too random or that are _freeze_=True + return [_GcRef(x) for x in d if _keep_object(x)] + +def _keep_object(x): + if isinstance(x, type) or type(x) is types.ClassType: + return False # don't keep any type + if isinstance(x, (list, dict, str)): + return True # keep lists and dicts and strings + try: + return not x._freeze_() # don't keep any frozen object + except AttributeError: + return type(x).__module__ != '__builtin__' # keep non-builtins + except Exception: + return False # don't keep objects whose _freeze_() method explodes + +def get_rpy_memory_usage(gcref): + "NOT_RPYTHON" + # approximate implementation using CPython's type info + Class = type(gcref._x) + size = Class.__basicsize__ + if Class.__itemsize__ > 0: + size += Class.__itemsize__ * len(gcref._x) + return size + +def get_rpy_type_index(gcref): + "NOT_RPYTHON" + from pypy.rlib.rarithmetic import intmask + Class = gcref._x.__class__ + return intmask(id(Class)) + +def cast_gcref_to_int(gcref): + if we_are_translated(): + return lltype.cast_ptr_to_int(gcref) + else: + return id(gcref._x) + +def dump_rpy_heap(fd): + "NOT_RPYTHON" + raise NotImplementedError + +NULL_GCREF = lltype.nullptr(llmemory.GCREF.TO) + +class _GcRef(object): + # implementation-specific: there should not be any after translation + __slots__ = ['_x'] + def __init__(self, x): + self._x = x + def __hash__(self): + return object.__hash__(self._x) + def __eq__(self, other): + if isinstance(other, lltype._ptr): + assert other == NULL_GCREF, ( + "comparing a _GcRef with a non-NULL lltype ptr") + return False + assert isinstance(other, _GcRef) + return self._x is other._x + def __ne__(self, other): + return not self.__eq__(other) + def __repr__(self): + return "_GcRef(%r)" % (self._x, ) + def _freeze_(self): + raise Exception("instances of rlib.rgc._GcRef cannot be translated") + +def cast_instance_to_gcref(x): + # Before translation, casts an RPython instance into a _GcRef. + # After translation, it is a variant of cast_object_to_ptr(GCREF). + if we_are_translated(): + from pypy.rpython import annlowlevel + x = annlowlevel.cast_instance_to_base_ptr(x) + return lltype.cast_opaque_ptr(llmemory.GCREF, x) + else: + return _GcRef(x) +cast_instance_to_gcref._annspecialcase_ = 'specialize:argtype(0)' + +def try_cast_gcref_to_instance(Class, gcref): + # Before translation, unwraps the RPython instance contained in a _GcRef. + # After translation, it is a type-check performed by the GC. + if we_are_translated(): + from pypy.rpython.annlowlevel import base_ptr_lltype + from pypy.rpython.annlowlevel import cast_base_ptr_to_instance + from pypy.rpython.lltypesystem import rclass + if _is_rpy_instance(gcref): + objptr = lltype.cast_opaque_ptr(base_ptr_lltype(), gcref) + if objptr.typeptr: # may be NULL, e.g. in rdict's dummykeyobj + clsptr = _get_llcls_from_cls(Class) + if rclass.ll_isinstance(objptr, clsptr): + return cast_base_ptr_to_instance(Class, objptr) + return None + else: + if isinstance(gcref._x, Class): + return gcref._x + return None +try_cast_gcref_to_instance._annspecialcase_ = 'specialize:arg(0)' + +# ------------------- implementation ------------------- + +_cache_s_list_of_gcrefs = None + +def s_list_of_gcrefs(): + global _cache_s_list_of_gcrefs + if _cache_s_list_of_gcrefs is None: + from pypy.annotation import model as annmodel + from pypy.annotation.listdef import ListDef + s_gcref = annmodel.SomePtr(llmemory.GCREF) + _cache_s_list_of_gcrefs = annmodel.SomeList( + ListDef(None, s_gcref, mutated=True, resized=False)) + return _cache_s_list_of_gcrefs + +class Entry(ExtRegistryEntry): + _about_ = get_rpy_roots + def compute_result_annotation(self): + return s_list_of_gcrefs() + def specialize_call(self, hop): + return hop.genop('gc_get_rpy_roots', [], resulttype = hop.r_result) + +class Entry(ExtRegistryEntry): + _about_ = get_rpy_referents + def compute_result_annotation(self, s_gcref): + from pypy.annotation import model as annmodel + assert annmodel.SomePtr(llmemory.GCREF).contains(s_gcref) + return s_list_of_gcrefs() + def specialize_call(self, hop): + vlist = hop.inputargs(hop.args_r[0]) + return hop.genop('gc_get_rpy_referents', vlist, + resulttype = hop.r_result) + +class Entry(ExtRegistryEntry): + _about_ = get_rpy_memory_usage + def compute_result_annotation(self, s_gcref): + from pypy.annotation import model as annmodel + return annmodel.SomeInteger() + def specialize_call(self, hop): + vlist = hop.inputargs(hop.args_r[0]) + return hop.genop('gc_get_rpy_memory_usage', vlist, + resulttype = hop.r_result) + +class Entry(ExtRegistryEntry): + _about_ = get_rpy_type_index + def compute_result_annotation(self, s_gcref): + from pypy.annotation import model as annmodel + return annmodel.SomeInteger() + def specialize_call(self, hop): + vlist = hop.inputargs(hop.args_r[0]) + return hop.genop('gc_get_rpy_type_index', vlist, + resulttype = hop.r_result) + +def _is_rpy_instance(gcref): + "NOT_RPYTHON" + raise NotImplementedError + +def _get_llcls_from_cls(Class): + "NOT_RPYTHON" + raise NotImplementedError + +class Entry(ExtRegistryEntry): + _about_ = _is_rpy_instance + def compute_result_annotation(self, s_gcref): + from pypy.annotation import model as annmodel + return annmodel.SomeBool() + def specialize_call(self, hop): + vlist = hop.inputargs(hop.args_r[0]) + return hop.genop('gc_is_rpy_instance', vlist, + resulttype = hop.r_result) + +class Entry(ExtRegistryEntry): + _about_ = _get_llcls_from_cls + def compute_result_annotation(self, s_Class): + from pypy.annotation import model as annmodel + from pypy.rpython.lltypesystem import rclass + assert s_Class.is_constant() + return annmodel.SomePtr(rclass.CLASSTYPE) + def specialize_call(self, hop): + from pypy.rpython.rclass import getclassrepr + from pypy.objspace.flow.model import Constant + from pypy.rpython.lltypesystem import rclass + Class = hop.args_s[0].const + classdef = hop.rtyper.annotator.bookkeeper.getuniqueclassdef(Class) + classrepr = getclassrepr(hop.rtyper, classdef) + vtable = classrepr.getvtable() + assert lltype.typeOf(vtable) == rclass.CLASSTYPE + return Constant(vtable, concretetype=rclass.CLASSTYPE) + +class Entry(ExtRegistryEntry): + _about_ = dump_rpy_heap + def compute_result_annotation(self, s_fd): + from pypy.annotation.model import s_None + return s_None + def specialize_call(self, hop): + vlist = hop.inputargs(lltype.Signed) + hop.exception_is_here() + return hop.genop('gc_dump_rpy_heap', vlist, resulttype = hop.r_result) Modified: pypy/trunk/pypy/rlib/test/test_rgc.py ============================================================================== --- pypy/trunk/pypy/rlib/test/test_rgc.py (original) +++ pypy/trunk/pypy/rlib/test/test_rgc.py Fri Sep 17 15:15:50 2010 @@ -153,3 +153,29 @@ assert len(s2.vars) == 3 for i in range(3): assert s2.vars[i] == 50 + i + + +def test_get_objects(): + class X(object): + pass + x1 = X() + lst = rgc._get_objects() + assert rgc.cast_instance_to_gcref(x1) in lst + +def test_get_referents(): + class X(object): + __slots__ = ['stuff'] + x1 = X() + x1.stuff = X() + x2 = X() + lst = rgc._get_referents(rgc.cast_instance_to_gcref(x1)) + lst2 = [rgc.try_cast_gcref_to_instance(X, x) for x in lst] + assert x1.stuff in lst2 + assert x2 not in lst2 + +def test_get_memory_usage(): + class X(object): + pass + x1 = X() + n = rgc._get_memory_usage(rgc.cast_instance_to_gcref(x1)) + assert n >= 8 and n <= 64 Modified: pypy/trunk/pypy/rpython/memory/gc/base.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/base.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/base.py Fri Sep 17 15:15:50 2010 @@ -53,7 +53,8 @@ varsize_offset_to_length, varsize_offsets_to_gcpointers_in_var_part, weakpointer_offset, - member_index): + member_index, + is_rpython_class): self.getfinalizer = getfinalizer self.is_varsize = is_varsize self.has_gcptr_in_varsize = has_gcptr_in_varsize @@ -66,6 +67,7 @@ self.varsize_offsets_to_gcpointers_in_var_part = varsize_offsets_to_gcpointers_in_var_part self.weakpointer_offset = weakpointer_offset self.member_index = member_index + self.is_rpython_class = is_rpython_class def get_member_index(self, type_id): return self.member_index(type_id) @@ -101,6 +103,9 @@ def get_size(self, obj): return self._get_size_for_typeid(obj, self.get_type_id(obj)) + def get_size_incl_hash(self, obj): + return self.get_size(obj) + def malloc(self, typeid, length=0, zero=False): """For testing. The interface used by the gctransformer is the four malloc_[fixed,var]size[_clear]() functions. Modified: pypy/trunk/pypy/rpython/memory/gc/markcompact.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/markcompact.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/markcompact.py Fri Sep 17 15:15:50 2010 @@ -674,6 +674,13 @@ return llmemory.cast_adr_to_int(obj) # not in an arena... return adr - self.space + def get_size_incl_hash(self, obj): + size = self.get_size(obj) + hdr = self.header(obj) + if hdr.tid & GCFLAG_HASHFIELD: + size += llmemory.sizeof(lltype.Signed) + return size + # ____________________________________________________________ class CannotAllocateGCArena(Exception): Modified: pypy/trunk/pypy/rpython/memory/gctransform/framework.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gctransform/framework.py (original) +++ pypy/trunk/pypy/rpython/memory/gctransform/framework.py Fri Sep 17 15:15:50 2010 @@ -7,7 +7,7 @@ from pypy.rpython.memory.gc import marksweep from pypy.rpython.memory.gcheader import GCHeaderBuilder from pypy.rlib.rarithmetic import ovfcheck -from pypy.rlib import rstack +from pypy.rlib import rstack, rgc from pypy.rlib.debug import ll_assert from pypy.translator.backendopt import graphanalyze from pypy.translator.backendopt.support import var_needsgc @@ -139,6 +139,8 @@ def __init__(self, translator): from pypy.rpython.memory.gc.base import choose_gc_from_config from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP + from pypy.rpython.memory.gc import inspect + super(FrameworkGCTransformer, self).__init__(translator, inline=True) if hasattr(self, 'GC_PARAMS'): # for tests: the GC choice can be specified as class attributes @@ -388,6 +390,31 @@ else: self.id_ptr = None + self.get_rpy_roots_ptr = getfn(inspect.get_rpy_roots, + [s_gc], + rgc.s_list_of_gcrefs(), + minimal_transform=False) + self.get_rpy_referents_ptr = getfn(inspect.get_rpy_referents, + [s_gc, s_gcref], + rgc.s_list_of_gcrefs(), + minimal_transform=False) + self.get_rpy_memory_usage_ptr = getfn(inspect.get_rpy_memory_usage, + [s_gc, s_gcref], + annmodel.SomeInteger(), + minimal_transform=False) + self.get_rpy_type_index_ptr = getfn(inspect.get_rpy_type_index, + [s_gc, s_gcref], + annmodel.SomeInteger(), + minimal_transform=False) + self.is_rpy_instance_ptr = getfn(inspect.is_rpy_instance, + [s_gc, s_gcref], + annmodel.SomeBool(), + minimal_transform=False) + self.dump_rpy_heap_ptr = getfn(inspect.dump_rpy_heap, + [s_gc, annmodel.SomeInteger()], + annmodel.s_None, + minimal_transform=False) + self.set_max_heap_size_ptr = getfn(GCClass.set_max_heap_size.im_func, [s_gc, annmodel.SomeInteger(nonneg=True)], @@ -883,6 +910,53 @@ def gct_gc_get_type_info_group(self, hop): return hop.cast_result(self.c_type_info_group) + def gct_gc_get_rpy_roots(self, hop): + livevars = self.push_roots(hop) + hop.genop("direct_call", + [self.get_rpy_roots_ptr, self.c_const_gc], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + + def gct_gc_get_rpy_referents(self, hop): + livevars = self.push_roots(hop) + [v_ptr] = hop.spaceop.args + hop.genop("direct_call", + [self.get_rpy_referents_ptr, self.c_const_gc, v_ptr], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + + def gct_gc_get_rpy_memory_usage(self, hop): + livevars = self.push_roots(hop) + [v_ptr] = hop.spaceop.args + hop.genop("direct_call", + [self.get_rpy_memory_usage_ptr, self.c_const_gc, v_ptr], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + + def gct_gc_get_rpy_type_index(self, hop): + livevars = self.push_roots(hop) + [v_ptr] = hop.spaceop.args + hop.genop("direct_call", + [self.get_rpy_type_index_ptr, self.c_const_gc, v_ptr], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + + def gct_gc_is_rpy_instance(self, hop): + livevars = self.push_roots(hop) + [v_ptr] = hop.spaceop.args + hop.genop("direct_call", + [self.is_rpy_instance_ptr, self.c_const_gc, v_ptr], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + + def gct_gc_dump_rpy_heap(self, hop): + livevars = self.push_roots(hop) + [v_fd] = hop.spaceop.args + hop.genop("direct_call", + [self.dump_rpy_heap_ptr, self.c_const_gc, v_fd], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + def gct_malloc_nonmovable_varsize(self, hop): TYPE = hop.spaceop.result.concretetype if self.gcdata.gc.can_malloc_nonmovable(): Modified: pypy/trunk/pypy/rpython/memory/gctypelayout.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gctypelayout.py (original) +++ pypy/trunk/pypy/rpython/memory/gctypelayout.py Fri Sep 17 15:15:50 2010 @@ -101,6 +101,10 @@ infobits = self.get(typeid).infobits return infobits & T_MEMBER_INDEX + def q_is_rpython_class(self, typeid): + infobits = self.get(typeid).infobits + return infobits & T_IS_RPYTHON_INSTANCE != 0 + def set_query_functions(self, gc): gc.set_query_functions( self.q_is_varsize, @@ -114,7 +118,8 @@ self.q_varsize_offset_to_length, self.q_varsize_offsets_to_gcpointers_in_var_part, self.q_weakpointer_offset, - self.q_member_index) + self.q_member_index, + self.q_is_rpython_class) # the lowest 16bits are used to store group member index @@ -123,6 +128,7 @@ T_HAS_GCPTR_IN_VARSIZE = 0x20000 T_IS_GCARRAY_OF_GCPTR = 0x40000 T_IS_WEAKREF = 0x80000 +T_IS_RPYTHON_INSTANCE = 0x100000 # the type is a subclass of OBJECT T_KEY_MASK = intmask(0xFF000000) T_KEY_VALUE = intmask(0x7A000000) # bug detection only @@ -181,6 +187,8 @@ varinfo.varitemsize = llmemory.sizeof(ARRAY.OF) if builder.is_weakref_type(TYPE): infobits |= T_IS_WEAKREF + if is_subclass_of_object(TYPE): + infobits |= T_IS_RPYTHON_INSTANCE info.infobits = infobits | T_KEY_VALUE # ____________________________________________________________ @@ -259,9 +267,7 @@ else: # no vtable from lltype2vtable -- double-check to be sure # that it's not a subclass of OBJECT. - while isinstance(TYPE, lltype.GcStruct): - assert TYPE is not rclass.OBJECT - _, TYPE = TYPE._first_struct() + assert not is_subclass_of_object(TYPE) def get_info(self, type_id): res = llop.get_group_member(GCData.TYPE_INFO_PTR, @@ -437,6 +443,13 @@ for i in range(p._obj.getlength()): zero_gc_pointers_inside(p[i], ITEM) +def is_subclass_of_object(TYPE): + while isinstance(TYPE, lltype.GcStruct): + if TYPE is rclass.OBJECT: + return True + _, TYPE = TYPE._first_struct() + return False + ########## weakrefs ########## # framework: weakref objects are small structures containing only an address Modified: pypy/trunk/pypy/rpython/rptr.py ============================================================================== --- pypy/trunk/pypy/rpython/rptr.py (original) +++ pypy/trunk/pypy/rpython/rptr.py Fri Sep 17 15:15:50 2010 @@ -35,6 +35,9 @@ id = lltype.cast_ptr_to_int(p) return ll_str.ll_int2hex(r_uint(id), True) + def get_ll_eq_function(self): + return None + def rtype_getattr(self, hop): attr = hop.args_s[1].const if isinstance(hop.s_result, annmodel.SomeLLADTMeth): Modified: pypy/trunk/pypy/translator/c/test/test_newgc.py ============================================================================== --- pypy/trunk/pypy/translator/c/test/test_newgc.py (original) +++ pypy/trunk/pypy/translator/c/test/test_newgc.py Fri Sep 17 15:15:50 2010 @@ -2,7 +2,7 @@ import sys, os, inspect from pypy.objspace.flow.model import summary -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.memory.test import snippet from pypy.rlib import rgc @@ -23,6 +23,7 @@ GC_CAN_SHRINK_ARRAY = False _isolated_func = None + c_allfuncs = None @classmethod def _makefunc_str_int(cls, f): @@ -891,6 +892,202 @@ def test_arraycopy_writebarrier_ptr(self): self.run("arraycopy_writebarrier_ptr") + def define_get_rpy_roots(self): + U = lltype.GcStruct('U', ('x', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + + def g(s): + lst = rgc.get_rpy_roots() + found = False + for x in lst: + if x == lltype.cast_opaque_ptr(llmemory.GCREF, s): + found = True + if x == lltype.cast_opaque_ptr(llmemory.GCREF, s.u): + os.write(2, "s.u should not be found!\n") + assert False + return found == 1 + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + found = g(s) + if not found: + os.write(2, "not found!\n") + assert False + s.u.x = 42 + return 0 + + return fn + + def test_get_rpy_roots(self): + self.run("get_rpy_roots") + + def define_get_rpy_referents(self): + U = lltype.GcStruct('U', ('x', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, s.u) + lst = rgc.get_rpy_referents(gcref1) + assert gcref2 in lst + assert gcref1 not in lst + s.u.x = 42 + return 0 + + return fn + + def test_get_rpy_referents(self): + self.run("get_rpy_referents") + + def define_is_rpy_instance(self): + class Foo: + pass + S = lltype.GcStruct('S', ('x', lltype.Signed)) + + def check(gcref, expected): + result = rgc._is_rpy_instance(gcref) + assert result == expected + + def fn(): + s = lltype.malloc(S) + gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + check(gcref1, False) + + f = Foo() + gcref3 = rgc.cast_instance_to_gcref(f) + check(gcref3, True) + + return 0 + + return fn + + def test_is_rpy_instance(self): + self.run("is_rpy_instance") + + def define_try_cast_gcref_to_instance(self): + class Foo: + pass + class FooBar(Foo): + pass + class Biz(object): + pass + S = lltype.GcStruct('S', ('x', lltype.Signed)) + + def fn(): + foo = Foo() + gcref1 = rgc.cast_instance_to_gcref(foo) + assert rgc.try_cast_gcref_to_instance(Foo, gcref1) is foo + assert rgc.try_cast_gcref_to_instance(FooBar, gcref1) is None + assert rgc.try_cast_gcref_to_instance(Biz, gcref1) is None + + foobar = FooBar() + gcref2 = rgc.cast_instance_to_gcref(foobar) + assert rgc.try_cast_gcref_to_instance(Foo, gcref2) is foobar + assert rgc.try_cast_gcref_to_instance(FooBar, gcref2) is foobar + assert rgc.try_cast_gcref_to_instance(Biz, gcref2) is None + + s = lltype.malloc(S) + gcref3 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + assert rgc.try_cast_gcref_to_instance(Foo, gcref3) is None + assert rgc.try_cast_gcref_to_instance(FooBar, gcref3) is None + assert rgc.try_cast_gcref_to_instance(Biz, gcref3) is None + + return 0 + + return fn + + def test_try_cast_gcref_to_instance(self): + self.run("try_cast_gcref_to_instance") + + def define_get_rpy_memory_usage(self): + U = lltype.GcStruct('U', ('x1', lltype.Signed), + ('x2', lltype.Signed), + ('x3', lltype.Signed), + ('x4', lltype.Signed), + ('x5', lltype.Signed), + ('x6', lltype.Signed), + ('x7', lltype.Signed), + ('x8', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + A = lltype.GcArray(lltype.Ptr(S)) + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + a = lltype.malloc(A, 1000) + gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + int1 = rgc.get_rpy_memory_usage(gcref1) + assert 8 <= int1 <= 32 + gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, s.u) + int2 = rgc.get_rpy_memory_usage(gcref2) + assert 4*9 <= int2 <= 8*12 + gcref3 = lltype.cast_opaque_ptr(llmemory.GCREF, a) + int3 = rgc.get_rpy_memory_usage(gcref3) + assert 4*1001 <= int3 <= 8*1010 + return 0 + + return fn + + def test_get_rpy_memory_usage(self): + self.run("get_rpy_memory_usage") + + def define_get_rpy_type_index(self): + U = lltype.GcStruct('U', ('x', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + A = lltype.GcArray(lltype.Ptr(S)) + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + a = lltype.malloc(A, 1000) + s2 = lltype.malloc(S) + gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + int1 = rgc.get_rpy_type_index(gcref1) + gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, s.u) + int2 = rgc.get_rpy_type_index(gcref2) + gcref3 = lltype.cast_opaque_ptr(llmemory.GCREF, a) + int3 = rgc.get_rpy_type_index(gcref3) + gcref4 = lltype.cast_opaque_ptr(llmemory.GCREF, s2) + int4 = rgc.get_rpy_type_index(gcref4) + assert int1 != int2 + assert int1 != int3 + assert int2 != int3 + assert int1 == int4 + return 0 + + return fn + + def test_get_rpy_type_index(self): + self.run("get_rpy_type_index") + + filename_dump = str(udir.join('test_dump_rpy_heap')) + def define_dump_rpy_heap(self): + U = lltype.GcStruct('U', ('x', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + A = lltype.GcArray(lltype.Ptr(S)) + filename = self.filename_dump + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + a = lltype.malloc(A, 1000) + s2 = lltype.malloc(S) + # + fd = os.open(filename, os.O_WRONLY | os.O_CREAT, 0666) + rgc.dump_rpy_heap(fd) + os.close(fd) + return 0 + + return fn + + def test_dump_rpy_heap(self): + self.run("dump_rpy_heap") + assert os.path.exists(self.filename_dump) + assert os.path.getsize(self.filename_dump) > 0 # minimal test + class TestSemiSpaceGC(TestUsingFramework, snippet.SemiSpaceGCTestDefines): gcpolicy = "semispace" From arigo at codespeak.net Fri Sep 17 15:16:18 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 17 Sep 2010 15:16:18 +0200 (CEST) Subject: [pypy-svn] r77143 - pypy/branch/gc-module Message-ID: <20100917131618.5240D282B9E@codespeak.net> Author: arigo Date: Fri Sep 17 15:16:16 2010 New Revision: 77143 Removed: pypy/branch/gc-module/ Log: Remove merged branch. From arigo at codespeak.net Fri Sep 17 15:16:52 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 17 Sep 2010 15:16:52 +0200 (CEST) Subject: [pypy-svn] r77144 - pypy/branch/gen2-gc/pypy/translator/c/test Message-ID: <20100917131652.BDF4A282B9E@codespeak.net> Author: arigo Date: Fri Sep 17 15:16:51 2010 New Revision: 77144 Modified: pypy/branch/gen2-gc/pypy/translator/c/test/test_newgc.py Log: Add a passing test about removetypeptr with the MiniMarkGC. Modified: pypy/branch/gen2-gc/pypy/translator/c/test/test_newgc.py ============================================================================== --- pypy/branch/gen2-gc/pypy/translator/c/test/test_newgc.py (original) +++ pypy/branch/gen2-gc/pypy/translator/c/test/test_newgc.py Fri Sep 17 15:16:51 2010 @@ -1191,3 +1191,6 @@ class TestMarkCompactGCMostCompact(TaggedPointersTest, TestMarkCompactGC): removetypeptr = True + +class TestMiniMarkGCMostCompact(TaggedPointersTest, TestMiniMarkGC): + removetypeptr = True From antocuni at codespeak.net Fri Sep 17 15:34:07 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Fri, 17 Sep 2010 15:34:07 +0200 (CEST) Subject: [pypy-svn] r77145 - in pypy/branch/resoperation-refactoring/pypy/jit: backend/cli backend/llgraph backend/llsupport/test backend/llvm backend/test backend/x86 backend/x86/test metainterp metainterp/optimizeopt metainterp/test Message-ID: <20100917133407.4E2EC282B9E@codespeak.net> Author: antocuni Date: Fri Sep 17 15:34:04 2010 New Revision: 77145 Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/cli/method.py pypy/branch/resoperation-refactoring/pypy/jit/backend/cli/runner.py pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/test/test_gc.py pypy/branch/resoperation-refactoring/pypy/jit/backend/llvm/compile.py pypy/branch/resoperation-refactoring/pypy/jit/backend/test/runner_test.py pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/assembler.py pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_recompilation.py pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_regalloc.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/graphpage.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimize.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/heap.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_oparser.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_recursive.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_virtualref.py Log: (david, antocuni): use the official API for getting and setting descrs Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/cli/method.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/cli/method.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/cli/method.py Fri Sep 17 15:34:04 2010 @@ -209,7 +209,7 @@ for op in operations: if op.getopnum() in (rop.GETFIELD_GC, rop.SETFIELD_GC): box = op.args[0] - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.FieldDescr) box2classes.setdefault(box, []).append(descr.selfclass) if op in self.cliloop.guard2ops: @@ -544,7 +544,7 @@ self.emit_guard_overflow_impl(op, OpCodes.Brfalse) def emit_op_jump(self, op): - target_token = op.descr + target_token = op.getdescr() assert isinstance(target_token, LoopToken) if target_token.cliloop is self.cliloop: # jump to the beginning of the loop @@ -586,7 +586,7 @@ self.store_result(op) def emit_op_instanceof(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) clitype = descr.get_clitype() op.args[0].load(self) @@ -604,7 +604,7 @@ self.store_result(op) def emit_op_call_impl(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.StaticMethDescr) delegate_type = descr.get_delegate_clitype() meth_invoke = descr.get_meth_info() @@ -619,7 +619,7 @@ emit_op_call_pure = emit_op_call def emit_op_oosend(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.MethDescr) clitype = descr.get_self_clitype() methinfo = descr.get_meth_info() @@ -639,7 +639,7 @@ self.store_result(op) def emit_op_getfield_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.FieldDescr) clitype = descr.get_self_clitype() fieldinfo = descr.get_field_info() @@ -653,7 +653,7 @@ emit_op_getfield_gc_pure = emit_op_getfield_gc def emit_op_setfield_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.FieldDescr) clitype = descr.get_self_clitype() fieldinfo = descr.get_field_info() @@ -665,7 +665,7 @@ self.il.Emit(OpCodes.Stfld, fieldinfo) def emit_op_getarrayitem_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) clitype = descr.get_array_clitype() itemtype = descr.get_clitype() @@ -678,7 +678,7 @@ emit_op_getarrayitem_gc_pure = emit_op_getarrayitem_gc def emit_op_setarrayitem_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) clitype = descr.get_array_clitype() itemtype = descr.get_clitype() @@ -689,7 +689,7 @@ self.il.Emit(OpCodes.Stelem, itemtype) def emit_op_arraylen_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) clitype = descr.get_array_clitype() op.args[0].load(self) @@ -698,7 +698,7 @@ self.store_result(op) def emit_op_new_array(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) item_clitype = descr.get_clitype() if item_clitype is None: Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/cli/runner.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/cli/runner.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/cli/runner.py Fri Sep 17 15:34:04 2010 @@ -105,7 +105,7 @@ def _attach_token_to_faildescrs(self, token, operations): for op in operations: if op.is_guard(): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, AbstractFailDescr) descr._loop_token = token descr._guard_op = op @@ -136,7 +136,7 @@ func = cliloop.funcbox.holder.GetFunc() func(self.get_inputargs()) op = self.failing_ops[self.inputargs.get_failed_op()] - return op.descr + return op.getdescr() def set_future_value_int(self, index, intvalue): self.get_inputargs().set_int(index, intvalue) Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py Fri Sep 17 15:34:04 2010 @@ -152,14 +152,14 @@ def _compile_operations(self, c, operations, var2index): for op in operations: llimpl.compile_add(c, op.getopnum()) - descr = op.descr + descr = op.getdescr() if isinstance(descr, Descr): llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo) if isinstance(descr, history.LoopToken) and op.getopnum() != rop.JUMP: llimpl.compile_add_loop_token(c, descr) if self.is_oo and isinstance(descr, (OODescr, MethDescr)): # hack hack, not rpython - c._obj.externalobj.operations[-1].descr = descr + c._obj.externalobj.operations[-1].setdescr(descr) for i in range(op.numargs()): x = op.getarg(i) if isinstance(x, history.Box): @@ -174,7 +174,7 @@ raise Exception("'%s' args contain: %r" % (op.getopname(), x)) if op.is_guard(): - faildescr = op.descr + faildescr = op.getdescr() assert isinstance(faildescr, history.AbstractFailDescr) faildescr._fail_args_types = [] for box in op.fail_args: @@ -205,12 +205,12 @@ op = operations[-1] assert op.is_final() if op.getopnum() == rop.JUMP: - targettoken = op.descr + targettoken = op.getdescr() assert isinstance(targettoken, history.LoopToken) compiled_version = targettoken._llgraph_compiled_version llimpl.compile_add_jump_target(c, compiled_version) elif op.getopnum() == rop.FINISH: - faildescr = op.descr + faildescr = op.getdescr() index = self.get_fail_descr_number(faildescr) llimpl.compile_add_fail(c, index) else: Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/test/test_gc.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/test/test_gc.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/test/test_gc.py Fri Sep 17 15:34:04 2010 @@ -262,7 +262,7 @@ assert newops[0].getarg(0) == v_base assert newops[0].getarg(1) == v_value assert newops[0].result is None - wbdescr = newops[0].descr + wbdescr = newops[0].getdescr() assert isinstance(wbdescr.jit_wb_if_flag, int) assert isinstance(wbdescr.jit_wb_if_flag_byteofs, int) assert isinstance(wbdescr.jit_wb_if_flag_singlebyte, int) @@ -300,7 +300,7 @@ assert len(operations) == 2 assert operations[0].getopnum() == rop.GETFIELD_RAW assert operations[0].getarg(0) == ConstInt(43) - assert operations[0].descr == gc_ll_descr.single_gcref_descr + assert operations[0].getdescr() == gc_ll_descr.single_gcref_descr v_box = operations[0].result assert isinstance(v_box, BoxPtr) assert operations[1].getopnum() == rop.PTR_EQ @@ -366,7 +366,7 @@ assert operations[1].getopnum() == rop.SETFIELD_RAW assert operations[1].getarg(0) == v_base assert operations[1].getarg(1) == v_value - assert operations[1].descr == field_descr + assert operations[1].getdescr() == field_descr def test_rewrite_assembler_3(self): # check write barriers before SETARRAYITEM_GC @@ -391,4 +391,4 @@ assert operations[1].getarg(0) == v_base assert operations[1].getarg(1) == v_index assert operations[1].getarg(2) == v_value - assert operations[1].descr == array_descr + assert operations[1].getdescr() == array_descr Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llvm/compile.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/llvm/compile.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/llvm/compile.py Fri Sep 17 15:34:04 2010 @@ -475,7 +475,7 @@ return location def generate_GETFIELD_GC(self, op): - loc = self._generate_field_gep(op.args[0], op.descr) + loc = self._generate_field_gep(op.args[0], op.getdescr()) self.vars[op.result] = llvm_rffi.LLVMBuildLoad(self.builder, loc, "") generate_GETFIELD_GC_PURE = generate_GETFIELD_GC @@ -483,7 +483,7 @@ generate_GETFIELD_RAW_PURE = generate_GETFIELD_GC def generate_SETFIELD_GC(self, op): - fielddescr = op.descr + fielddescr = op.getdescr() loc = self._generate_field_gep(op.args[0], fielddescr) assert isinstance(fielddescr, FieldDescr) getarg = self.cpu.getarg_by_index[fielddescr.size_index] @@ -491,7 +491,7 @@ llvm_rffi.LLVMBuildStore(self.builder, value_ref, loc, "") def generate_CALL(self, op): - calldescr = op.descr + calldescr = op.getdescr() assert isinstance(calldescr, CallDescr) ty_function_ptr = self.cpu.get_calldescr_ty_function_ptr(calldescr) v = op.args[0] @@ -579,7 +579,7 @@ self.vars[op.result] = llvm_rffi.LLVMBuildLoad(self.builder, loc, "") def generate_ARRAYLEN_GC(self, op): - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, ArrayDescr) self._generate_len(op, arraydescr.ty_array_ptr, self.cpu.const_array_index_length) @@ -598,7 +598,7 @@ return location def _generate_array_gep(self, op): - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, ArrayDescr) location = self._generate_gep(op, arraydescr.ty_array_ptr, self.cpu.const_array_index_array) @@ -612,7 +612,7 @@ def generate_SETARRAYITEM_GC(self, op): loc = self._generate_array_gep(op) - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, ArrayDescr) getarg = self.cpu.getarg_by_index[arraydescr.itemsize_index] value_ref = getarg(self, op.args[2]) @@ -660,7 +660,7 @@ return res def generate_NEW(self, op): - sizedescr = op.descr + sizedescr = op.getdescr() assert isinstance(sizedescr, SizeDescr) res = self._generate_new(self.cpu._make_const_int(sizedescr.size)) self.vars[op.result] = res @@ -695,7 +695,7 @@ self.vars[op.result] = res def generate_NEW_ARRAY(self, op): - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, ArrayDescr) self._generate_new_array(op, arraydescr.ty_array_ptr, self.cpu._make_const_int(arraydescr.itemsize), Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/test/runner_test.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/test/runner_test.py Fri Sep 17 15:34:04 2010 @@ -1,5 +1,6 @@ import py, sys, random, os, struct, operator from pypy.jit.metainterp.history import (AbstractFailDescr, + AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, LoopToken, @@ -39,7 +40,7 @@ else: raise NotImplementedError(box) res = self.cpu.execute_token(looptoken) - if res is operations[-1].descr: + if res is operations[-1].getdescr(): self.guard_failed = False else: self.guard_failed = True @@ -77,7 +78,7 @@ operations[0].fail_args = [] if not descr: descr = BasicFailDescr(1) - operations[0].descr = descr + operations[0].setdescr(descr) inputargs = [] for box in valueboxes: if isinstance(box, Box) and box not in inputargs: @@ -910,7 +911,7 @@ ResOperation(rop.JUMP, jumpargs, None, descr=looptoken), ] operations[2].fail_args = inputargs[:] - operations[2].descr = faildescr + operations[2].setdescr(faildescr) # self.cpu.compile_loop(inputargs, operations, looptoken) # @@ -1412,7 +1413,7 @@ FUNC = self.FuncType([lltype.Ptr(S), lltype.Signed], lltype.Void) func_ptr = llhelper(lltype.Ptr(FUNC), func_void) funcbox = self.get_funcbox(self.cpu, func_ptr) - class WriteBarrierDescr: + class WriteBarrierDescr(AbstractDescr): jit_wb_if_flag = 4096 jit_wb_if_flag_byteofs = struct.pack("i", 4096).index('\x10') jit_wb_if_flag_singlebyte = 0x10 @@ -1824,7 +1825,7 @@ f2 = float_add(f0, f1) finish(f2)''' loop = parse(ops) - done_number = self.cpu.get_fail_descr_number(loop.operations[-1].descr) + done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) looptoken = LoopToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py Fri Sep 17 15:34:04 2010 @@ -105,11 +105,11 @@ args.append('ConstInt(%d)' % v.value) else: raise NotImplementedError(v) - if op.descr is None: + if op.getdescr() is None: descrstr = '' else: try: - descrstr = ', ' + op.descr._random_info + descrstr = ', ' + op.getdescr()._random_info except AttributeError: descrstr = ', descr=...' print >>s, ' ResOperation(rop.%s, [%s], %s%s),' % ( @@ -284,7 +284,7 @@ builder.intvars[:] = original_intvars else: op = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) - op.descr = BasicFailDescr() + op.setdescr(BasicFailDescr()) op.fail_args = fail_subset builder.loop.operations.append(op) @@ -345,7 +345,7 @@ def produce_into(self, builder, r): op, passing = self.gen_guard(builder, r) builder.loop.operations.append(op) - op.descr = BasicFailDescr() + op.setdescr(BasicFailDescr()) op.fail_args = builder.subset_of_intvars(r) if not passing: builder.should_fail_by = op @@ -606,7 +606,7 @@ else: raise NotImplementedError(box) fail = cpu.execute_token(self.loop.token) - assert fail is self.should_fail_by.descr + assert fail is self.should_fail_by.getdescr() for i, v in enumerate(self.get_fail_args()): if isinstance(v, (BoxFloat, ConstFloat)): value = cpu.get_latest_value_float(i) @@ -633,7 +633,7 @@ else: op = ResOperation(rop.GUARD_EXCEPTION, [guard_op._exc_box], BoxPtr()) - op.descr = BasicFailDescr() + op.setdescr(BasicFailDescr()) op.fail_args = [] return op @@ -642,7 +642,7 @@ r = self.r guard_op = self.guard_op fail_args = guard_op.fail_args - fail_descr = guard_op.descr + fail_descr = guard_op.getdescr() op = self.should_fail_by if not op.fail_args: return False Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/assembler.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/assembler.py Fri Sep 17 15:34:04 2010 @@ -688,7 +688,7 @@ def regalloc_perform_with_guard(self, op, guard_op, faillocs, arglocs, resloc, current_depths): - faildescr = guard_op.descr + faildescr = guard_op.getdescr() assert isinstance(faildescr, AbstractFailDescr) faildescr._x86_current_depths = current_depths failargs = guard_op.fail_args @@ -1656,7 +1656,7 @@ def genop_guard_call_may_force(self, op, guard_op, guard_token, arglocs, result_loc): - faildescr = guard_op.descr + faildescr = guard_op.getdescr() fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) self.genop_call(op, arglocs, result_loc) @@ -1665,10 +1665,10 @@ def genop_guard_call_assembler(self, op, guard_op, guard_token, arglocs, result_loc): - faildescr = guard_op.descr + faildescr = guard_op.getdescr() fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) - descr = op.descr + descr = op.getdescr() assert isinstance(descr, LoopToken) assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) # @@ -1753,7 +1753,7 @@ def genop_discard_cond_call_gc_wb(self, op, arglocs): # use 'mc._mc' directly instead of 'mc', to avoid # bad surprizes if the code buffer is mostly full - descr = op.descr + descr = op.getdescr() if we_are_translated(): cls = self.cpu.gc_ll_descr.has_write_barrier_class() assert cls is not None and isinstance(descr, cls) Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py Fri Sep 17 15:34:04 2010 @@ -268,7 +268,7 @@ selected_reg, need_lower_byte) def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.getopnum() != rop.JUMP or jump.descr is not looptoken: + if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: loop_consts = {} else: loop_consts = {} @@ -451,7 +451,7 @@ def consider_finish(self, op): locs = [self.loc(op.getarg(i)) for i in range(op.numargs())] locs_are_ref = [op.getarg(i).type == REF for i in range(op.numargs())] - fail_index = self.assembler.cpu.get_fail_descr_number(op.descr) + fail_index = self.assembler.cpu.get_fail_descr_number(op.getdescr()) self.assembler.generate_failure(fail_index, locs, self.exc, locs_are_ref) self.possibly_free_vars_for_op(op) @@ -663,7 +663,7 @@ self.Perform(op, arglocs, resloc) def _consider_call(self, op, guard_not_forced_op=None): - calldescr = op.descr + calldescr = op.getdescr() assert isinstance(calldescr, BaseCallDescr) assert len(calldescr.arg_classes) == op.numargs() - 1 size = calldescr.get_result_size(self.translate_support_code) @@ -678,7 +678,7 @@ self._consider_call(op, guard_op) def consider_call_assembler(self, op, guard_op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, LoopToken) jd = descr.outermost_jitdriver_sd assert jd is not None @@ -739,10 +739,10 @@ def consider_new(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr - if gc_ll_descr.can_inline_malloc(op.descr): - self._fastpath_malloc(op, op.descr) + if gc_ll_descr.can_inline_malloc(op.getdescr()): + self._fastpath_malloc(op, op.getdescr()) else: - args = gc_ll_descr.args_for_new(op.descr) + args = gc_ll_descr.args_for_new(op.getdescr()) arglocs = [imm(x) for x in args] return self._call(op, arglocs) @@ -813,13 +813,13 @@ gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newarray is not None: # framework GC - args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.descr) + args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.getdescr()) arglocs = [imm(x) for x in args] arglocs.append(self.loc(op.getarg(0))) return self._call(op, arglocs) # boehm GC (XXX kill the following code at some point) scale_of_field, basesize, ofs_length, _ = ( - self._unpack_arraydescr(op.descr)) + self._unpack_arraydescr(op.getdescr())) return self._malloc_varsize(basesize, ofs_length, scale_of_field, op.getarg(0), op.result) @@ -843,7 +843,7 @@ return imm(ofs), imm(size), ptr def consider_setfield_gc(self, op): - ofs_loc, size_loc, ptr = self._unpack_fielddescr(op.descr) + ofs_loc, size_loc, ptr = self._unpack_fielddescr(op.getdescr()) assert isinstance(size_loc, ImmedLoc) if size_loc.value == 1: need_lower_byte = True @@ -870,7 +870,7 @@ consider_unicodesetitem = consider_strsetitem def consider_setarrayitem_gc(self, op): - scale, ofs, _, ptr = self._unpack_arraydescr(op.descr) + scale, ofs, _, ptr = self._unpack_arraydescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) if scale == 0: @@ -887,7 +887,7 @@ consider_setarrayitem_raw = consider_setarrayitem_gc def consider_getfield_gc(self, op): - ofs_loc, size_loc, _ = self._unpack_fielddescr(op.descr) + ofs_loc, size_loc, _ = self._unpack_fielddescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.rm.possibly_free_vars(args) @@ -899,7 +899,7 @@ consider_getfield_gc_pure = consider_getfield_gc def consider_getarrayitem_gc(self, op): - scale, ofs, _, _ = self._unpack_arraydescr(op.descr) + scale, ofs, _, _ = self._unpack_arraydescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) @@ -939,7 +939,7 @@ consider_unicodelen = consider_strlen def consider_arraylen_gc(self, op): - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, BaseArrayDescr) ofs = arraydescr.get_ofs_length(self.translate_support_code) args = op.getarglist() @@ -961,7 +961,7 @@ def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None - descr = op.descr + descr = op.getdescr() assert isinstance(descr, LoopToken) self.jump_target_descr = descr nonfloatlocs, floatlocs = assembler.target_arglocs(self.jump_target_descr) Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_recompilation.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_recompilation.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_recompilation.py Fri Sep 17 15:34:04 2010 @@ -47,7 +47,7 @@ finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - descr = loop.operations[2].descr + descr = loop.operations[2].getdescr() new = descr._x86_bridge_frame_depth assert descr._x86_bridge_param_depth == 0 # XXX: Maybe add enough ops to force stack on 64-bit as well? @@ -114,8 +114,8 @@ assert loop.token._x86_param_depth == 0 # XXX: Maybe add enough ops to force stack on 64-bit as well? if IS_X86_32: - assert guard_op.descr._x86_bridge_frame_depth > loop_frame_depth - assert guard_op.descr._x86_bridge_param_depth == 0 + assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth + assert guard_op.getdescr()._x86_bridge_param_depth == 0 self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 0) self.cpu.set_future_value_int(2, 0) Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_regalloc.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_regalloc.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_regalloc.py Fri Sep 17 15:34:04 2010 @@ -160,7 +160,7 @@ bridge = self.parse(ops, **kwds) assert ([box.type for box in bridge.inputargs] == [box.type for box in guard_op.fail_args]) - faildescr = guard_op.descr + faildescr = guard_op.getdescr() self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations) return bridge @@ -607,7 +607,7 @@ ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].descr._x86_bridge_param_depth == self.expected_param_depth(2) + assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) self.cpu.set_future_value_int(0, 4) self.cpu.set_future_value_int(1, 7) @@ -630,7 +630,7 @@ ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].descr._x86_bridge_param_depth == self.expected_param_depth(2) + assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) self.cpu.set_future_value_int(0, 4) self.cpu.set_future_value_int(1, 7) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py Fri Sep 17 15:34:04 2010 @@ -65,7 +65,7 @@ jitdriver_sd = metainterp.jitdriver_sd loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd) loop.token = loop_token - loop.operations[-1].descr = loop_token # patch the target of the JUMP + loop.operations[-1].setdescr(loop_token) # patch the target of the JUMP try: old_loop_token = jitdriver_sd.warmstate.optimize_loop( metainterp_sd, old_loop_tokens, loop) @@ -540,7 +540,7 @@ op = new_loop.operations[-1] if not isinstance(target_loop_token, TerminatingLoopToken): # normal case - op.descr = target_loop_token # patch the jump target + op.setdescr(target_loop_token) # patch the jump target else: # The target_loop_token is a pseudo loop token, # e.g. loop_tokens_done_with_this_frame_void[0] Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/graphpage.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/graphpage.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/graphpage.py Fri Sep 17 15:34:04 2010 @@ -17,13 +17,13 @@ for graph, highlight in graphs: for op in graph.get_operations(): if is_interesting_guard(op): - graphs.append((SubGraph(op.descr._debug_suboperations), + graphs.append((SubGraph(op.getdescr()._debug_suboperations), highlight)) graphpage = ResOpGraphPage(graphs, errmsg) graphpage.display() def is_interesting_guard(op): - return hasattr(op.descr, '_debug_suboperations') + return hasattr(op.getdescr(), '_debug_suboperations') class ResOpGraphPage(GraphPage): @@ -155,7 +155,7 @@ op = operations[opindex] lines.append(repr(op)) if is_interesting_guard(op): - tgt = op.descr._debug_suboperations[0] + tgt = op.getdescr()._debug_suboperations[0] tgt_g, tgt_i = self.all_operations[tgt] self.genedge((graphindex, opstartindex), (tgt_g, tgt_i), @@ -168,7 +168,7 @@ (graphindex, opindex)) break if op.getopnum() == rop.JUMP: - tgt = op.descr + tgt = op.getdescr() tgt_g = -1 if tgt is None: tgt_g = graphindex Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py Fri Sep 17 15:34:04 2010 @@ -769,9 +769,9 @@ if isinstance(box, Box): assert box in seen if op.is_guard(): - assert op.descr is not None - if hasattr(op.descr, '_debug_suboperations'): - ops = op.descr._debug_suboperations + assert op.getdescr() is not None + if hasattr(op.getdescr(), '_debug_suboperations'): + ops = op.getdescr()._debug_suboperations TreeLoop.check_consistency_of_branch(ops, seen.copy()) for box in op.fail_args or []: if box is not None: @@ -786,7 +786,7 @@ seen[box] = True assert operations[-1].is_final() if operations[-1].getopnum() == rop.JUMP: - target = operations[-1].descr + target = operations[-1].getdescr() if target is not None: assert isinstance(target, LoopToken) @@ -816,9 +816,9 @@ return result.extend(operations) for op in operations: - if op.is_guard() and op.descr: - if hasattr(op.descr, '_debug_suboperations'): - ops = op.descr._debug_suboperations + if op.is_guard() and op.getdescr(): + if hasattr(op.getdescr(), '_debug_suboperations'): + ops = op.getdescr()._debug_suboperations _list_all_operations(result, ops, omit_finish) # ____________________________________________________________ Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py Fri Sep 17 15:34:04 2010 @@ -89,8 +89,8 @@ else: res = "" is_guard = op.is_guard() - if op.descr is not None: - descr = op.descr + if op.getdescr() is not None: + descr = op.getdescr() if is_guard and self.guard_number: index = self.metainterp_sd.cpu.get_fail_descr_number(descr) r = "" % index Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimize.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimize.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimize.py Fri Sep 17 15:34:04 2010 @@ -43,7 +43,7 @@ finder.find_nodes_bridge(bridge) for old_loop_token in old_loop_tokens: if finder.bridge_matches(old_loop_token.specnodes): - bridge.operations[-1].descr = old_loop_token # patch jump target + bridge.operations[-1].setdescr(old_loop_token) # patch jump target optimize_bridge_1(metainterp_sd, bridge) return old_loop_token return None Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py Fri Sep 17 15:34:04 2010 @@ -163,7 +163,7 @@ argboxes = [self.get_constant_box(op.getarg(i)) for i in range(op.numargs())] resbox = execute_nonspec(self.cpu, None, - op.getopnum(), argboxes, op.descr) + op.getopnum(), argboxes, op.getdescr()) self.set_constant_node(op.result, resbox.constbox()) # default case: mark the arguments as escaping for i in range(op.numargs()): @@ -187,7 +187,7 @@ def find_nodes_NEW(self, op): instnode = InstanceNode() - instnode.structdescr = op.descr + instnode.structdescr = op.getdescr() self.nodes[op.result] = instnode def find_nodes_NEW_ARRAY(self, op): @@ -197,7 +197,7 @@ return # var-sized arrays are not virtual arraynode = InstanceNode() arraynode.arraysize = lengthbox.getint() - arraynode.arraydescr = op.descr + arraynode.arraydescr = op.getdescr() self.nodes[op.result] = arraynode def find_nodes_ARRAYLEN_GC(self, op): @@ -226,7 +226,7 @@ if instnode.escaped: fieldnode.mark_escaped() return # nothing to be gained from tracking the field - field = op.descr + field = op.getdescr() assert isinstance(field, AbstractValue) if instnode.curfields is None: instnode.curfields = {} @@ -237,7 +237,7 @@ instnode = self.getnode(op.getarg(0)) if instnode.escaped: return # nothing to be gained from tracking the field - field = op.descr + field = op.getdescr() assert isinstance(field, AbstractValue) if instnode.curfields is not None and field in instnode.curfields: fieldnode = instnode.curfields[field] Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/heap.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/heap.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/heap.py Fri Sep 17 15:34:04 2010 @@ -117,7 +117,7 @@ if opnum == rop.CALL_ASSEMBLER: effectinfo = None else: - effectinfo = op.descr.get_extra_info() + effectinfo = op.getdescr().get_extra_info() if effectinfo is not None: # XXX we can get the wrong complexity here, if the lists # XXX stored on effectinfo are large @@ -203,20 +203,20 @@ def force_lazy_setfield_if_necessary(self, op, value, write=False): try: - op1 = self.lazy_setfields[op.descr] + op1 = self.lazy_setfields[op.getdescr()] except KeyError: if write: - self.lazy_setfields_descrs.append(op.descr) + self.lazy_setfields_descrs.append(op.getdescr()) else: if self.getvalue(op1.getarg(0)) is not value: - self.force_lazy_setfield(op.descr) + self.force_lazy_setfield(op.getdescr()) def optimize_GETFIELD_GC(self, op): value = self.getvalue(op.getarg(0)) self.force_lazy_setfield_if_necessary(op, value) # check if the field was read from another getfield_gc just before # or has been written to recently - fieldvalue = self.read_cached_field(op.descr, value) + fieldvalue = self.read_cached_field(op.getdescr(), value) if fieldvalue is not None: self.make_equal_to(op.result, fieldvalue) return @@ -226,34 +226,34 @@ self.emit_operation(op) # FIXME: These might need constant propagation? # then remember the result of reading the field fieldvalue = self.getvalue(op.result) - self.cache_field_value(op.descr, value, fieldvalue) + self.cache_field_value(op.getdescr(), value, fieldvalue) def optimize_SETFIELD_GC(self, op): value = self.getvalue(op.getarg(0)) fieldvalue = self.getvalue(op.getarg(1)) self.force_lazy_setfield_if_necessary(op, value, write=True) - self.lazy_setfields[op.descr] = op + self.lazy_setfields[op.getdescr()] = op # remember the result of future reads of the field - self.cache_field_value(op.descr, value, fieldvalue, write=True) + self.cache_field_value(op.getdescr(), value, fieldvalue, write=True) def optimize_GETARRAYITEM_GC(self, op): value = self.getvalue(op.getarg(0)) indexvalue = self.getvalue(op.getarg(1)) - fieldvalue = self.read_cached_arrayitem(op.descr, value, indexvalue) + fieldvalue = self.read_cached_arrayitem(op.getdescr(), value, indexvalue) if fieldvalue is not None: self.make_equal_to(op.result, fieldvalue) return ###self.optimizer.optimize_default(op) self.emit_operation(op) # FIXME: These might need constant propagation? fieldvalue = self.getvalue(op.result) - self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue) + self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue) def optimize_SETARRAYITEM_GC(self, op): self.emit_operation(op) value = self.getvalue(op.getarg(0)) fieldvalue = self.getvalue(op.getarg(2)) indexvalue = self.getvalue(op.getarg(1)) - self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue, + self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue, write=True) def propagate_forward(self, op): Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py Fri Sep 17 15:34:04 2010 @@ -340,7 +340,7 @@ def store_final_boxes_in_guard(self, op): ###pendingfields = self.heap_op_optimizer.force_lazy_setfields_for_guard() - descr = op.descr + descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo) newboxes = modifier.finish(self.values, self.pendingfields) @@ -395,7 +395,7 @@ argboxes = [self.get_constant_box(op.getarg(i)) for i in range(op.numargs())] resbox = execute_nonspec(self.cpu, None, - op.getopnum(), argboxes, op.descr) + op.getopnum(), argboxes, op.getdescr()) self.make_constant(op.result, resbox.constbox()) if is_ovf: self.i += 1 # skip next operation, it is the unneeded guard @@ -404,7 +404,7 @@ # did we do the exact same operation already? args = self.make_args_key(op) oldop = self.pure_operations.get(args, None) - if oldop is not None and oldop.descr is op.descr: + if oldop is not None and oldop.getdescr() is op.getdescr(): assert oldop.getopnum() == op.getopnum() self.make_equal_to(op.result, self.getvalue(oldop.result)) if is_ovf: Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py Fri Sep 17 15:34:04 2010 @@ -24,7 +24,7 @@ def try_boolinvers(self, op, targs): oldop = self.optimizer.pure_operations.get(targs, None) - if oldop is not None and oldop.descr is op.descr: + if oldop is not None and oldop.getdescr() is op.getdescr(): value = self.getvalue(oldop.result) if value.is_constant(): if value.box.same_constant(CONST_1): @@ -50,7 +50,7 @@ oldopnum = opboolreflex[op.getopnum()] # FIXME: add INT_ADD, INT_MUL targs = [args[1], args[0], ConstInt(oldopnum)] oldop = self.optimizer.pure_operations.get(targs, None) - if oldop is not None and oldop.descr is op.descr: + if oldop is not None and oldop.getdescr() is op.getdescr(): self.make_equal_to(op.result, self.getvalue(oldop.result)) return True except KeyError: @@ -139,7 +139,7 @@ # replace CALL_PURE with just CALL args = op.getarglist()[1:] self.emit_operation(ResOperation(rop.CALL, args, op.result, - op.descr)) + op.getdescr())) def optimize_guard(self, op, constbox, emit_operation=True): value = self.getvalue(op.getarg(0)) if value.is_constant(): @@ -182,9 +182,9 @@ args = [old_guard_op.getarg(0), op.getarg(1)]) self.optimizer.newoperations[value.last_guard_index] = new_guard_op # hack hack hack. Change the guard_opnum on - # new_guard_op.descr so that when resuming, + # new_guard_op.getdescr() so that when resuming, # the operation is not skipped by pyjitpl.py. - descr = new_guard_op.descr + descr = new_guard_op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) descr.guard_opnum = rop.GUARD_VALUE descr.make_a_counter_per_value(new_guard_op) @@ -222,9 +222,9 @@ args = [old_guard_op.getarg(0), op.getarg(1)]) self.optimizer.newoperations[value.last_guard_index] = new_guard_op # hack hack hack. Change the guard_opnum on - # new_guard_op.descr so that when resuming, + # new_guard_op.getdescr() so that when resuming, # the operation is not skipped by pyjitpl.py. - descr = new_guard_op.descr + descr = new_guard_op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) descr.guard_opnum = rop.GUARD_NONNULL_CLASS emit_operation = False @@ -314,7 +314,7 @@ value = self.getvalue(op.getarg(0)) realclassbox = value.get_constant_class(self.optimizer.cpu) if realclassbox is not None: - checkclassbox = self.optimizer.cpu.typedescr2classbox(op.descr) + checkclassbox = self.optimizer.cpu.typedescr2classbox(op.getdescr()) result = self.optimizer.cpu.ts.subclassOf(self.optimizer.cpu, realclassbox, checkclassbox) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py Fri Sep 17 15:34:04 2010 @@ -285,7 +285,7 @@ def optimize_JUMP(self, op): orgop = self.optimizer.loop.operations[-1] exitargs = [] - target_loop_token = orgop.descr + target_loop_token = orgop.getdescr() assert isinstance(target_loop_token, LoopToken) specnodes = target_loop_token.specnodes assert op.numargs() == len(specnodes) @@ -344,7 +344,7 @@ if value.is_virtual(): # optimizefindnode should ensure that fieldvalue is found assert isinstance(value, AbstractVirtualValue) - fieldvalue = value.getfield(op.descr, None) + fieldvalue = value.getfield(op.getdescr(), None) assert fieldvalue is not None self.make_equal_to(op.result, fieldvalue) else: @@ -360,7 +360,7 @@ value = self.getvalue(op.getarg(0)) fieldvalue = self.getvalue(op.getarg(1)) if value.is_virtual(): - value.setfield(op.descr, fieldvalue) + value.setfield(op.getdescr(), fieldvalue) else: value.ensure_nonnull() ###self.heap_op_optimizer.optimize_SETFIELD_GC(op, value, fieldvalue) @@ -370,7 +370,7 @@ self.make_virtual(op.getarg(0), op.result, op) def optimize_NEW(self, op): - self.make_vstruct(op.descr, op.result, op) + self.make_vstruct(op.getdescr(), op.result, op) def optimize_NEW_ARRAY(self, op): sizebox = self.get_constant_box(op.getarg(0)) @@ -379,8 +379,8 @@ # build a new one with the ConstInt argument if not isinstance(op.getarg(0), ConstInt): op = ResOperation(rop.NEW_ARRAY, [sizebox], op.result, - descr=op.descr) - self.make_varray(op.descr, sizebox.getint(), op.result, op) + descr=op.getdescr()) + self.make_varray(op.getdescr(), sizebox.getint(), op.result, op) else: ###self.optimize_default(op) self.emit_operation(op) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py Fri Sep 17 15:34:04 2010 @@ -24,6 +24,11 @@ self.result = result self.setdescr(descr) + def __setattr__(self, name, attr): + if name == 'descr': + assert False + object.__setattr__(self, name, attr) + def copy_and_change(self, opnum, args=None, result=None, descr=None): "shallow copy: the returned operation is meant to be used in place of self" if args is None: @@ -62,7 +67,7 @@ self.fail_args = fail_args def getdescr(self): - return self.descr + return self._descr def setdescr(self, descr): # for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt @@ -72,10 +77,10 @@ # cpu.calldescrof(), and cpu.typedescrof(). from pypy.jit.metainterp.history import check_descr check_descr(descr) - self.descr = descr + self._descr = descr def clone(self): - descr = self.descr + descr = self._descr if descr is not None: descr = descr.clone_if_mutable() op = ResOperation(self._opnum, self._args, self.result, descr) @@ -98,12 +103,12 @@ prefix = "%s:%s " % (self.name, self.pc) else: prefix = "" - if self.descr is None or we_are_translated(): + if self._descr is None or we_are_translated(): return '%s%s%s(%s)' % (prefix, sres, self.getopname(), ', '.join([str(a) for a in self._args])) else: return '%s%s%s(%s, descr=%r)' % (prefix, sres, self.getopname(), - ', '.join([str(a) for a in self._args]), self.descr) + ', '.join([str(a) for a in self._args]), self._descr) def getopname(self): try: Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/simple_optimize.py Fri Sep 17 15:34:04 2010 @@ -18,7 +18,7 @@ op = ResOperation(rop.CALL, args, op.result, descr=descr) elif op.getopnum() == rop.CALL_PURE: args = op.getarglist()[1:] - op = ResOperation(rop.CALL, args, op.result, op.descr) + op = ResOperation(rop.CALL, args, op.result, op.getdescr()) elif op.getopnum() == rop.VIRTUAL_REF: op = ResOperation(rop.SAME_AS, [op.getarg(0)], op.result) elif op.getopnum() == rop.VIRTUAL_REF_FINISH: @@ -38,7 +38,7 @@ newoperations = [] for op in loop.operations: if op.is_guard(): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) modifier = resume.ResumeDataVirtualAdder(descr, memo) newboxes = modifier.finish(EMPTY_VALUES) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_oparser.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_oparser.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_oparser.py Fri Sep 17 15:34:04 2010 @@ -19,7 +19,7 @@ assert [op.getopnum() for op in loop.operations] == [rop.INT_ADD, rop.INT_SUB, rop.FINISH] assert len(loop.inputargs) == 2 - assert loop.operations[-1].descr + assert loop.operations[-1].getdescr() def test_const_ptr_subops(): x = """ @@ -30,7 +30,7 @@ vtable = lltype.nullptr(S) loop = parse(x, None, locals()) assert len(loop.operations) == 1 - assert loop.operations[0].descr + assert loop.operations[0].getdescr() assert loop.operations[0].fail_args == [] def test_descr(): @@ -43,7 +43,7 @@ """ stuff = Xyz() loop = parse(x, None, locals()) - assert loop.operations[0].descr is stuff + assert loop.operations[0].getdescr() is stuff def test_after_fail(): x = """ @@ -64,7 +64,7 @@ """ stuff = Xyz() loop = parse(x, None, locals()) - assert loop.operations[0].descr is stuff + assert loop.operations[0].getdescr() is stuff def test_boxname(): x = """ @@ -119,7 +119,7 @@ jump() ''' loop = parse(x) - assert loop.operations[0].descr is loop.token + assert loop.operations[0].getdescr() is loop.token def test_jump_target_other(): looptoken = LoopToken() @@ -128,7 +128,7 @@ jump(descr=looptoken) ''' loop = parse(x, namespace=locals()) - assert loop.operations[0].descr is looptoken + assert loop.operations[0].getdescr() is looptoken def test_floats(): x = ''' Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py Fri Sep 17 15:34:04 2010 @@ -151,7 +151,7 @@ else: remap[op2.result] = op1.result if op1.getopnum() != rop.JUMP: # xxx obscure - assert op1.descr == op2.descr + assert op1.getdescr() == op2.getdescr() if op1.fail_args or op2.fail_args: assert len(op1.fail_args) == len(op2.fail_args) if strict_fail_args: @@ -2327,7 +2327,7 @@ from pypy.jit.metainterp.test.test_resume import MyMetaInterp guard_op, = [op for op in self.loop.operations if op.is_guard()] fail_args = guard_op.fail_args - fdescr = guard_op.descr + fdescr = guard_op.getdescr() assert fdescr.guard_opnum == guard_opnum reader = ResumeDataFakeReader(fdescr, fail_args, MyMetaInterp(self.cpu)) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_recursive.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_recursive.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_recursive.py Fri Sep 17 15:34:04 2010 @@ -319,8 +319,8 @@ for loop in get_stats().loops: assert len(loop.operations) <= length + 5 # because we only check once per metainterp bytecode for op in loop.operations: - if op.is_guard() and hasattr(op.descr, '_debug_suboperations'): - assert len(op.descr._debug_suboperations) <= length + 5 + if op.is_guard() and hasattr(op.getdescr(), '_debug_suboperations'): + assert len(op.getdescr()._debug_suboperations) <= length + 5 def test_inline_trace_limit(self): myjitdriver = JitDriver(greens=[], reds=['n']) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_virtualref.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_virtualref.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_virtualref.py Fri Sep 17 15:34:04 2010 @@ -88,7 +88,7 @@ cpu.get_latest_value_int = lambda i:guard_op.fail_args[i].getint() cpu.get_latest_value_ref = lambda i:guard_op.fail_args[i].getref_base() cpu.clear_latest_values = lambda count: None - resumereader = ResumeDataDirectReader(cpu, guard_op.descr) + resumereader = ResumeDataDirectReader(cpu, guard_op.getdescr()) vrefinfo = self.metainterp.staticdata.virtualref_info lst = [] vrefinfo.continue_tracing = lambda vref, virtual: \ @@ -100,7 +100,7 @@ lst[0][0]) # assert correct type # # try reloading from pyjitpl's point of view - self.metainterp.rebuild_state_after_failure(guard_op.descr) + self.metainterp.rebuild_state_after_failure(guard_op.getdescr()) assert len(self.metainterp.framestack) == 1 assert len(self.metainterp.virtualref_boxes) == 2 assert self.metainterp.virtualref_boxes[0].value == bxs1[0].value From fijal at codespeak.net Fri Sep 17 15:36:52 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Fri, 17 Sep 2010 15:36:52 +0200 (CEST) Subject: [pypy-svn] r77146 - pypy/branch/rsocket-improvements Message-ID: <20100917133652.DF210282B9E@codespeak.net> Author: fijal Date: Fri Sep 17 15:36:51 2010 New Revision: 77146 Removed: pypy/branch/rsocket-improvements/ Log: Remove merged branch From arigo at codespeak.net Fri Sep 17 15:42:00 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 17 Sep 2010 15:42:00 +0200 (CEST) Subject: [pypy-svn] r77147 - pypy/branch/better-map-instances Message-ID: <20100917134200.9D4A1282B9E@codespeak.net> Author: arigo Date: Fri Sep 17 15:41:59 2010 New Revision: 77147 Removed: pypy/branch/better-map-instances/ Log: Remove the branch, will be replaced by a merge of trunk and it. From arigo at codespeak.net Fri Sep 17 15:43:15 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 17 Sep 2010 15:43:15 +0200 (CEST) Subject: [pypy-svn] r77148 - in pypy/branch/better-map-instances: . pypy/config pypy/doc/config pypy/interpreter pypy/module/__builtin__ pypy/module/__builtin__/test pypy/module/_weakref pypy/module/cpyext pypy/module/pypyjit/test pypy/objspace/std pypy/objspace/std/test pypy/rlib pypy/rlib/test Message-ID: <20100917134315.75D71282B9E@codespeak.net> Author: arigo Date: Fri Sep 17 15:43:13 2010 New Revision: 77148 Added: pypy/branch/better-map-instances/ - copied from r77144, pypy/trunk/ pypy/branch/better-map-instances/pypy/doc/config/objspace.std.withmapdict.txt - copied unchanged from r77144, pypy/branch/better-map-instances/pypy/doc/config/objspace.std.withmapdict.txt pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py - copied unchanged from r77144, pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py - copied unchanged from r77144, pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py pypy/branch/better-map-instances/pypy/rlib/rerased.py - copied unchanged from r77144, pypy/branch/better-map-instances/pypy/rlib/rerased.py pypy/branch/better-map-instances/pypy/rlib/test/test_rerased.py - copied unchanged from r77144, pypy/branch/better-map-instances/pypy/rlib/test/test_rerased.py Modified: pypy/branch/better-map-instances/pypy/config/pypyoption.py pypy/branch/better-map-instances/pypy/interpreter/baseobjspace.py pypy/branch/better-map-instances/pypy/interpreter/pycode.py pypy/branch/better-map-instances/pypy/interpreter/pyopcode.py pypy/branch/better-map-instances/pypy/interpreter/typedef.py pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py pypy/branch/better-map-instances/pypy/module/__builtin__/test/test_classobj.py pypy/branch/better-map-instances/pypy/module/_weakref/interp__weakref.py pypy/branch/better-map-instances/pypy/module/cpyext/classobject.py pypy/branch/better-map-instances/pypy/module/pypyjit/test/test_pypy_c.py pypy/branch/better-map-instances/pypy/objspace/std/celldict.py pypy/branch/better-map-instances/pypy/objspace/std/dictmultiobject.py pypy/branch/better-map-instances/pypy/objspace/std/objspace.py pypy/branch/better-map-instances/pypy/objspace/std/sharingdict.py pypy/branch/better-map-instances/pypy/objspace/std/test/test_dictmultiobject.py pypy/branch/better-map-instances/pypy/objspace/std/test/test_shadowtracking.py pypy/branch/better-map-instances/pypy/objspace/std/typeobject.py Log: Merge the previous branch/better-map-instance into the trunk, into branch/better-map-instance again. Modified: pypy/branch/better-map-instances/pypy/config/pypyoption.py ============================================================================== --- pypy/trunk/pypy/config/pypyoption.py (original) +++ pypy/branch/better-map-instances/pypy/config/pypyoption.py Fri Sep 17 15:43:13 2010 @@ -238,6 +238,16 @@ default=False, requires=[("objspace.std.withshadowtracking", False)]), + BoolOption("withmapdict", + "make instances really small but slow without the JIT", + default=False, + requires=[("objspace.std.withshadowtracking", False), + ("objspace.std.withinlineddict", False), + ("objspace.std.withsharingdict", False), + ("objspace.std.getattributeshortcut", True), + ("objspace.std.withtypeversion", True), + ]), + BoolOption("withrangelist", "enable special range list implementation that does not " "actually create the full list until the resulting " @@ -319,6 +329,9 @@ # all the good optimizations for PyPy should be listed here if level in ['2', '3']: config.objspace.opcodes.suggest(CALL_LIKELY_BUILTIN=True) + config.objspace.std.suggest(withinlineddict=True) + if type_system != 'ootype': + config.objspace.std.suggest(withsharingdict=True) if level in ['2', '3', 'jit']: config.objspace.opcodes.suggest(CALL_METHOD=True) config.objspace.std.suggest(withrangelist=True) @@ -328,9 +341,6 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) - if type_system != 'ootype': - config.objspace.std.suggest(withsharingdict=True) - config.objspace.std.suggest(withinlineddict=True) # extra costly optimizations only go in level 3 if level == '3': @@ -343,7 +353,7 @@ config.objspace.std.suggest(withprebuiltint=True) config.objspace.std.suggest(withrangelist=True) config.objspace.std.suggest(withprebuiltchar=True) - config.objspace.std.suggest(withinlineddict=True) + config.objspace.std.suggest(withmapdict=True) config.objspace.std.suggest(withstrslice=True) config.objspace.std.suggest(withstrjoin=True) # xxx other options? ropes maybe? @@ -359,6 +369,7 @@ # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) + config.objspace.std.suggest(withmapdict=True) def enable_allworkingmodules(config): Modified: pypy/branch/better-map-instances/pypy/interpreter/baseobjspace.py ============================================================================== --- pypy/trunk/pypy/interpreter/baseobjspace.py (original) +++ pypy/branch/better-map-instances/pypy/interpreter/baseobjspace.py Fri Sep 17 15:43:13 2010 @@ -166,6 +166,20 @@ def _call_builtin_destructor(self): pass # method overridden in typedef.py + # hooks that the mapdict implementations needs: + def _get_mapdict_map(self): + return None + def _set_mapdict_map(self, map): + raise NotImplementedError + def _mapdict_read_storage(self, index): + raise NotImplementedError + def _mapdict_write_storage(self, index, value): + raise NotImplementedError + def _mapdict_storage_length(self): + raise NotImplementedError + def _set_mapdict_storage(self, storage): + raise NotImplementedError + class Wrappable(W_Root): """A subclass of Wrappable is an internal, interpreter-level class Modified: pypy/branch/better-map-instances/pypy/interpreter/pycode.py ============================================================================== --- pypy/trunk/pypy/interpreter/pycode.py (original) +++ pypy/branch/better-map-instances/pypy/interpreter/pycode.py Fri Sep 17 15:43:13 2010 @@ -117,6 +117,10 @@ self._compute_flatcall() + if self.space.config.objspace.std.withmapdict: + from pypy.objspace.std.mapdict import init_mapdict_cache + init_mapdict_cache(self) + def _freeze_(self): if (self.magic == cpython_magic and '__pypy__' not in sys.builtin_module_names): Modified: pypy/branch/better-map-instances/pypy/interpreter/pyopcode.py ============================================================================== --- pypy/trunk/pypy/interpreter/pyopcode.py (original) +++ pypy/branch/better-map-instances/pypy/interpreter/pyopcode.py Fri Sep 17 15:43:13 2010 @@ -710,9 +710,14 @@ def LOAD_ATTR(self, nameindex, next_instr): "obj.attributename" - w_attributename = self.getname_w(nameindex) w_obj = self.popvalue() - w_value = self.space.getattr(w_obj, w_attributename) + if (self.space.config.objspace.std.withmapdict + and not jit.we_are_jitted()): + from pypy.objspace.std.mapdict import LOAD_ATTR_caching + w_value = LOAD_ATTR_caching(self.getcode(), w_obj, nameindex) + else: + w_attributename = self.getname_w(nameindex) + w_value = self.space.getattr(w_obj, w_attributename) self.pushvalue(w_value) LOAD_ATTR._always_inline_ = True Modified: pypy/branch/better-map-instances/pypy/interpreter/typedef.py ============================================================================== --- pypy/trunk/pypy/interpreter/typedef.py (original) +++ pypy/branch/better-map-instances/pypy/interpreter/typedef.py Fri Sep 17 15:43:13 2010 @@ -133,6 +133,13 @@ typedef = cls.typedef if wants_dict and typedef.hasdict: wants_dict = False + if config.objspace.std.withmapdict and not typedef.hasdict: + # mapdict only works if the type does not already have a dict + if wants_del: + parentcls = get_unique_interplevel_subclass(config, cls, True, True, + False, True) + return _usersubclswithfeature(config, parentcls, "del") + return _usersubclswithfeature(config, cls, "user", "dict", "weakref", "slots") # Forest of if's - see the comment above. if wants_del: if wants_dict: @@ -186,10 +193,20 @@ def add(Proto): for key, value in Proto.__dict__.items(): - if not key.startswith('__') or key == '__del__': + if (not key.startswith('__') and not key.startswith('_mixin_') + or key == '__del__'): + if hasattr(value, "func_name"): + value = func_with_new_name(value, value.func_name) body[key] = value + if (config.objspace.std.withmapdict and "dict" in features): + from pypy.objspace.std.mapdict import BaseMapdictObject, ObjectMixin + add(BaseMapdictObject) + add(ObjectMixin) + features = () + if "user" in features: # generic feature needed by all subcls + class Proto(object): user_overridden_class = True @@ -255,6 +272,9 @@ wantdict = False if wantdict: + base_user_setup = supercls.user_setup.im_func + if "user_setup" in body: + base_user_setup = body["user_setup"] class Proto(object): def getdict(self): return self.w__dict__ @@ -263,11 +283,9 @@ self.w__dict__ = check_new_dictionary(space, w_dict) def user_setup(self, space, w_subtype): - self.space = space - self.w__class__ = w_subtype self.w__dict__ = space.newdict( instance=True, classofinstance=w_subtype) - self.user_setup_slots(w_subtype.nslots) + base_user_setup(self, space, w_subtype) def setclass(self, space, w_subtype): # only used by descr_set___class__ Modified: pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py ============================================================================== --- pypy/trunk/pypy/module/__builtin__/interp_classobj.py (original) +++ pypy/branch/better-map-instances/pypy/module/__builtin__/interp_classobj.py Fri Sep 17 15:43:13 2010 @@ -2,9 +2,11 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import ObjSpace, W_Root, NoneNotWrapped, applevel from pypy.interpreter.gateway import interp2app, ObjSpace -from pypy.interpreter.typedef import TypeDef, make_weakref_descr +from pypy.interpreter.typedef import TypeDef from pypy.interpreter.argument import Arguments from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.typedef import GetSetProperty, descr_get_dict +from pypy.interpreter.typedef import descr_set_dict from pypy.rlib.rarithmetic import r_uint, intmask from pypy.rlib.objectmodel import compute_identity_hash from pypy.rlib.debug import make_sure_not_resized @@ -57,6 +59,14 @@ self.bases_w = bases self.w_dict = w_dict + def instantiate(self, space): + cache = space.fromcache(Cache) + if self.lookup(space, '__del__') is not None: + w_inst = cache.cls_with_del(space, self) + else: + w_inst = cache.cls_without_del(space, self) + return w_inst + def getdict(self): return self.w_dict @@ -100,15 +110,15 @@ return False @jit.unroll_safe - def lookup(self, space, w_attr): + def lookup(self, space, attr): # returns w_value or interplevel None - w_result = space.finditem(self.w_dict, w_attr) + w_result = space.finditem_str(self.w_dict, attr) if w_result is not None: return w_result for base in self.bases_w: # XXX fix annotation of bases_w to be a list of W_ClassObjects assert isinstance(base, W_ClassObject) - w_result = base.lookup(space, w_attr) + w_result = base.lookup(space, attr) if w_result is not None: return w_result return None @@ -122,7 +132,7 @@ return space.wrap(self.name) elif name == "__bases__": return space.newtuple(self.bases_w) - w_value = self.lookup(space, w_attr) + w_value = self.lookup(space, name) if w_value is None: raise operationerrfmt( space.w_AttributeError, @@ -147,7 +157,7 @@ self.setbases(space, w_value) return elif name == "__del__": - if self.lookup(space, w_attr) is None: + if self.lookup(space, name) is None: msg = ("a __del__ method added to an existing class " "will not be called") space.warn(msg, space.w_RuntimeWarning) @@ -195,13 +205,20 @@ # NOT_RPYTHON return '' % self.name +class Cache: + def __init__(self, space): + from pypy.interpreter.typedef import _usersubclswithfeature + # evil + self.cls_without_del = _usersubclswithfeature( + space.config, W_InstanceObject, "dict", "weakref") + self.cls_with_del = _usersubclswithfeature( + space.config, self.cls_without_del, "del") + + def class_descr_call(space, w_self, __args__): self = space.interp_w(W_ClassObject, w_self) - if self.lookup(space, space.wrap('__del__')) is not None: - w_inst = W_InstanceObjectWithDel(space, self) - else: - w_inst = W_InstanceObject(space, self) - w_init = w_inst.getattr_from_class(space, space.wrap('__init__')) + w_inst = self.instantiate(space) + w_init = w_inst.getattr_from_class(space, '__init__') if w_init is not None: w_result = space.call_args(w_init, __args__) if not space.is_w(w_result, space.w_None): @@ -234,7 +251,7 @@ def make_unary_instance_method(name): def unaryop(self, space): - w_meth = self.getattr(space, space.wrap(name), True) + w_meth = self.getattr(space, name, True) return space.call_function(w_meth) unaryop.func_name = name return unaryop @@ -242,7 +259,7 @@ def make_binary_returning_notimplemented_instance_method(name): def binaryop(self, space, w_other): try: - w_meth = self.getattr(space, space.wrap(name), False) + w_meth = self.getattr(space, name, False) except OperationError, e: if e.match(space, space.w_AttributeError): return space.w_NotImplemented @@ -267,7 +284,7 @@ w_a = self w_b = w_other if w_a is self: - w_meth = self.getattr(space, space.wrap(specialname), False) + w_meth = self.getattr(space, specialname, False) if w_meth is None: return space.w_NotImplemented return space.call_function(w_meth, w_b) @@ -278,7 +295,7 @@ def rbinaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) if w_a is None or w_a is self: - w_meth = self.getattr(space, space.wrap(rspecialname), False) + w_meth = self.getattr(space, rspecialname, False) if w_meth is None: return space.w_NotImplemented return space.call_function(w_meth, w_other) @@ -302,46 +319,34 @@ raise OperationError( space.w_TypeError, space.wrap("instance() first arg must be class")) - if space.is_w(w_dict, space.w_None): - w_dict = None - elif not space.is_true(space.isinstance(w_dict, space.w_dict)): - raise OperationError( - space.w_TypeError, - space.wrap("instance() second arg must be dictionary or None")) - return W_InstanceObject(space, w_class, w_dict) + w_result = w_class.instantiate(space) + if not space.is_w(w_dict, space.w_None): + w_result.setdict(space, w_dict) + return w_result class W_InstanceObject(Wrappable): - def __init__(self, space, w_class, w_dict=None): - if w_dict is None: - w_dict = space.newdict(instance=True) + def __init__(self, space, w_class): + # note that user_setup is overridden by the typedef.py machinery + self.user_setup(space, space.gettypeobject(self.typedef)) assert isinstance(w_class, W_ClassObject) self.w_class = w_class - self.w_dict = w_dict - self.space = space - - def getdict(self): - return self.w_dict - def setdict(self, space, w_dict): - if (w_dict is None or - not space.is_true(space.isinstance(w_dict, space.w_dict))): - raise OperationError( - space.w_TypeError, - space.wrap("__dict__ must be a dictionary object")) - self.w_dict = w_dict + def user_setup(self, space, w_subtype): + self.space = space - def setclass(self, space, w_class): + def set_oldstyle_class(self, space, w_class): if w_class is None or not isinstance(w_class, W_ClassObject): raise OperationError( space.w_TypeError, space.wrap("__class__ must be set to a class")) self.w_class = w_class - def getattr_from_class(self, space, w_name): + def getattr_from_class(self, space, name): # Look up w_name in the class dict, and call its __get__. # This method ignores the instance dict and the __getattr__. # Returns None if not found. - w_value = self.w_class.lookup(space, w_name) + assert isinstance(name, str) + w_value = self.w_class.lookup(space, name) if w_value is None: return None w_descr_get = space.lookup(w_value, '__get__') @@ -349,19 +354,20 @@ return w_value return space.call_function(w_descr_get, w_value, self, self.w_class) - def getattr(self, space, w_name, exc=True): + def getattr(self, space, name, exc=True): # Normal getattr rules: look up w_name in the instance dict, # in the class dict, and then via a call to __getatttr__. - w_result = space.finditem(self.w_dict, w_name) + assert isinstance(name, str) + w_result = self.getdictvalue(space, name) if w_result is not None: return w_result - w_result = self.getattr_from_class(space, w_name) + w_result = self.getattr_from_class(space, name) if w_result is not None: return w_result - w_meth = self.getattr_from_class(space, space.wrap('__getattr__')) + w_meth = self.getattr_from_class(space, '__getattr__') if w_meth is not None: try: - return space.call_function(w_meth, w_name) + return space.call_function(w_meth, space.wrap(name)) except OperationError, e: if not exc and e.match(space, space.w_AttributeError): return None # eat the AttributeError @@ -371,7 +377,7 @@ raise operationerrfmt( space.w_AttributeError, "%s instance has no attribute '%s'", - self.w_class.name, space.str_w(w_name)) + self.w_class.name, name) else: return None @@ -379,44 +385,46 @@ name = space.str_w(w_attr) if len(name) >= 8 and name[0] == '_': if name == "__dict__": - return self.w_dict + return self.getdict() elif name == "__class__": return self.w_class - return self.getattr(space, w_attr) + return self.getattr(space, name) def descr_setattr(self, space, w_name, w_value): name = unwrap_attr(space, w_name) - w_meth = self.getattr_from_class(space, space.wrap('__setattr__')) + w_meth = self.getattr_from_class(space, '__setattr__') if name and name[0] == "_": if name == '__dict__': self.setdict(space, w_value) return if name == '__class__': - self.setclass(space, w_value) + self.set_oldstyle_class(space, w_value) return if name == '__del__' and w_meth is None: - if (not isinstance(self, W_InstanceObjectWithDel) - and space.finditem(self.w_dict, w_name) is None): + cache = space.fromcache(Cache) + if (not isinstance(self, cache.cls_with_del) + and self.getdictvalue(space, '__del__') is None): msg = ("a __del__ method added to an instance " "with no __del__ in the class will not be called") space.warn(msg, space.w_RuntimeWarning) if w_meth is not None: space.call_function(w_meth, w_name, w_value) else: - self.setdictvalue(space, name, w_value) + # bit obscure: appease normalization + self.setdictvalue(space, name, w_value, True) def descr_delattr(self, space, w_name): name = unwrap_attr(space, w_name) if name and name[0] == "_": if name == '__dict__': # use setdict to raise the error - self.setdict(space, None) + self.setdict(space, space.w_None) return elif name == '__class__': - # use setclass to raise the error - self.setclass(space, None) + # use set_oldstyle_class to raise the error + self.set_oldstyle_class(space, None) return - w_meth = self.getattr_from_class(space, space.wrap('__delattr__')) + w_meth = self.getattr_from_class(space, '__delattr__') if w_meth is not None: space.call_function(w_meth, w_name) else: @@ -427,7 +435,7 @@ self.w_class.name, name) def descr_repr(self, space): - w_meth = self.getattr(space, space.wrap('__repr__'), False) + w_meth = self.getattr(space, '__repr__', False) if w_meth is None: w_class = self.w_class mod = w_class.get_module_string(space) @@ -435,19 +443,19 @@ return space.call_function(w_meth) def descr_str(self, space): - w_meth = self.getattr(space, space.wrap('__str__'), False) + w_meth = self.getattr(space, '__str__', False) if w_meth is None: return self.descr_repr(space) return space.call_function(w_meth) def descr_unicode(self, space): - w_meth = self.getattr(space, space.wrap('__unicode__'), False) + w_meth = self.getattr(space, '__unicode__', False) if w_meth is None: return self.descr_str(space) return space.call_function(w_meth) def descr_len(self, space): - w_meth = self.getattr(space, space.wrap('__len__')) + w_meth = self.getattr(space, '__len__') w_result = space.call_function(w_meth) if space.is_true(space.isinstance(w_result, space.w_int)): if space.is_true(space.lt(w_result, space.wrap(0))): @@ -460,22 +468,22 @@ space.wrap("__len__() should return an int")) def descr_getitem(self, space, w_key): - w_meth = self.getattr(space, space.wrap('__getitem__')) + w_meth = self.getattr(space, '__getitem__') return space.call_function(w_meth, w_key) def descr_setitem(self, space, w_key, w_value): - w_meth = self.getattr(space, space.wrap('__setitem__')) + w_meth = self.getattr(space, '__setitem__') space.call_function(w_meth, w_key, w_value) def descr_delitem(self, space, w_key): - w_meth = self.getattr(space, space.wrap('__delitem__')) + w_meth = self.getattr(space, '__delitem__') space.call_function(w_meth, w_key) def descr_iter(self, space): - w_meth = self.getattr(space, space.wrap('__iter__'), False) + w_meth = self.getattr(space, '__iter__', False) if w_meth is not None: return space.call_function(w_meth) - w_meth = self.getattr(space, space.wrap('__getitem__'), False) + w_meth = self.getattr(space, '__getitem__', False) if w_meth is None: raise OperationError( space.w_TypeError, @@ -485,14 +493,14 @@ # don't see the point def descr_getslice(self, space, w_i, w_j): - w_meth = self.getattr(space, space.wrap('__getslice__'), False) + w_meth = self.getattr(space, '__getslice__', False) if w_meth is not None: return space.call_function(w_meth, w_i, w_j) else: return space.getitem(self, space.newslice(w_i, w_j, space.w_None)) def descr_setslice(self, space, w_i, w_j, w_sequence): - w_meth = self.getattr(space, space.wrap('__setslice__'), False) + w_meth = self.getattr(space, '__setslice__', False) if w_meth is not None: space.call_function(w_meth, w_i, w_j, w_sequence) else: @@ -500,20 +508,20 @@ w_sequence) def descr_delslice(self, space, w_i, w_j): - w_meth = self.getattr(space, space.wrap('__delslice__'), False) + w_meth = self.getattr(space, '__delslice__', False) if w_meth is not None: space.call_function(w_meth, w_i, w_j) else: return space.delitem(self, space.newslice(w_i, w_j, space.w_None)) def descr_call(self, space, __args__): - w_meth = self.getattr(space, space.wrap('__call__')) + w_meth = self.getattr(space, '__call__') return space.call_args(w_meth, __args__) def descr_nonzero(self, space): - w_func = self.getattr(space, space.wrap('__nonzero__'), False) + w_func = self.getattr(space, '__nonzero__', False) if w_func is None: - w_func = self.getattr(space, space.wrap('__len__'), False) + w_func = self.getattr(space, '__len__', False) if w_func is None: return space.w_True w_result = space.call_function(w_func) @@ -537,7 +545,7 @@ not isinstance(w_b, W_InstanceObject)): return space.cmp(w_a, w_b) if isinstance(w_a, W_InstanceObject): - w_func = w_a.getattr(space, space.wrap('__cmp__'), False) + w_func = w_a.getattr(space, '__cmp__', False) if w_func is not None: w_res = space.call_function(w_func, w_b) if space.is_w(w_res, space.w_NotImplemented): @@ -556,7 +564,7 @@ return space.wrap(-1) return space.wrap(0) if isinstance(w_b, W_InstanceObject): - w_func = w_b.getattr(space, space.wrap('__cmp__'), False) + w_func = w_b.getattr(space, '__cmp__', False) if w_func is not None: w_res = space.call_function(w_func, w_a) if space.is_w(w_res, space.w_NotImplemented): @@ -577,10 +585,10 @@ return space.w_NotImplemented def descr_hash(self, space): - w_func = self.getattr(space, space.wrap('__hash__'), False) + w_func = self.getattr(space, '__hash__', False) if w_func is None: - w_eq = self.getattr(space, space.wrap('__eq__'), False) - w_cmp = self.getattr(space, space.wrap('__cmp__'), False) + w_eq = self.getattr(space, '__eq__', False) + w_cmp = self.getattr(space, '__cmp__', False) if w_eq is not None or w_cmp is not None: raise OperationError(space.w_TypeError, space.wrap("unhashable instance")) @@ -595,7 +603,7 @@ return w_ret def descr_index(self, space): - w_func = self.getattr(space, space.wrap('__index__'), False) + w_func = self.getattr(space, '__index__', False) if w_func is not None: return space.call_function(w_func) raise OperationError( @@ -603,7 +611,7 @@ space.wrap("object cannot be interpreted as an index")) def descr_contains(self, space, w_obj): - w_func = self.getattr(space, space.wrap('__contains__'), False) + w_func = self.getattr(space, '__contains__', False) if w_func is not None: return space.wrap(space.is_true(space.call_function(w_func, w_obj))) # now do it ourselves @@ -626,7 +634,7 @@ w_a = self w_b = w_other if w_a is self: - w_func = self.getattr(space, space.wrap('__pow__'), False) + w_func = self.getattr(space, '__pow__', False) if w_func is not None: return space.call_function(w_func, w_other) return space.w_NotImplemented @@ -634,7 +642,7 @@ return space.pow(w_a, w_b, space.w_None) else: # CPython also doesn't try coercion in this case - w_func = self.getattr(space, space.wrap('__pow__'), False) + w_func = self.getattr(space, '__pow__', False) if w_func is not None: return space.call_function(w_func, w_other, w_modulo) return space.w_NotImplemented @@ -646,7 +654,7 @@ w_a = self w_b = w_other if w_a is self: - w_func = self.getattr(space, space.wrap('__rpow__'), False) + w_func = self.getattr(space, '__rpow__', False) if w_func is not None: return space.call_function(w_func, w_other) return space.w_NotImplemented @@ -654,13 +662,13 @@ return space.pow(w_b, w_a, space.w_None) else: # CPython also doesn't try coercion in this case - w_func = self.getattr(space, space.wrap('__rpow__'), False) + w_func = self.getattr(space, '__rpow__', False) if w_func is not None: return space.call_function(w_func, w_other, w_modulo) return space.w_NotImplemented def descr_next(self, space): - w_func = self.getattr(space, space.wrap('next'), False) + w_func = self.getattr(space, 'next', False) if w_func is None: raise OperationError(space.w_TypeError, space.wrap("instance has no next() method")) @@ -669,10 +677,9 @@ def descr_del(self, space): # Note that this is called from executioncontext.UserDelAction # via the space.userdel() method. - w_name = space.wrap('__del__') - w_func = space.finditem(self.w_dict, w_name) + w_func = self.getdictvalue(space, '__del__') if w_func is None: - w_func = self.getattr_from_class(space, w_name) + w_func = self.getattr_from_class(space, '__del__') if w_func is not None: space.call_function(w_func) @@ -717,6 +724,14 @@ rmeth, unwrap_spec=["self", ObjSpace, W_Root]) + +def descr_del_dict(space, w_inst): + # use setdict to raise the error + w_inst.setdict(space, space.w_None) + +dict_descr = GetSetProperty(descr_get_dict, descr_set_dict, descr_del_dict) +dict_descr.name = '__dict__' + W_InstanceObject.typedef = TypeDef("instance", __new__ = interp2app(descr_instance_new), __getattribute__ = interp2app(W_InstanceObject.descr_getattribute, @@ -766,12 +781,9 @@ unwrap_spec=['self', ObjSpace, W_Root, W_Root]), next = interp2app(W_InstanceObject.descr_next, unwrap_spec=['self', ObjSpace]), - __weakref__ = make_weakref_descr(W_InstanceObject), __del__ = interp2app(W_InstanceObject.descr_del, unwrap_spec=['self', ObjSpace]), + __dict__ = dict_descr, **rawdict ) - -class W_InstanceObjectWithDel(W_InstanceObject): - def __del__(self): - self._enqueue_for_destruction(self.space) +W_InstanceObject.typedef.acceptable_as_base_class = False Modified: pypy/branch/better-map-instances/pypy/module/__builtin__/test/test_classobj.py ============================================================================== --- pypy/trunk/pypy/module/__builtin__/test/test_classobj.py (original) +++ pypy/branch/better-map-instances/pypy/module/__builtin__/test/test_classobj.py Fri Sep 17 15:43:13 2010 @@ -928,6 +928,31 @@ assert x is b assert y == 5 + def test_cant_subclass_instance(self): + class A: + pass + try: + class B(type(A())): + pass + except TypeError: + pass + else: + assert 0, "should have raised" + + def test_dict_descriptor(self): + import sys + if not hasattr(sys, 'pypy_objspaceclass'): + skip("on CPython old-style instances don't have a __dict__ descriptor") + class A: + pass + a = A() + a.x = 1 + descr = type(a).__dict__['__dict__'] + assert descr.__get__(a) == {'x': 1} + descr.__set__(a, {'x': 2}) + assert a.x == 2 + raises(TypeError, descr.__delete__, a) + class AppTestOldStyleSharing(AppTestOldstyle): def setup_class(cls): @@ -966,3 +991,22 @@ a = 1 b = 2 assert self.is_strdict(A) + +class AppTestOldStyleMapDict(AppTestOldstyle): + def setup_class(cls): + cls.space = gettestobjspace(**{"objspace.std.withmapdict": True}) + if option.runappdirect: + py.test.skip("can only be run on py.py") + def has_mapdict(space, w_inst): + return space.wrap(w_inst._get_mapdict_map() is not None) + cls.w_has_mapdict = cls.space.wrap(gateway.interp2app(has_mapdict)) + + + def test_has_mapdict(self): + class A: + def __init__(self): + self.x = 42 + a = A() + assert a.x == 42 + assert self.has_mapdict(a) + Modified: pypy/branch/better-map-instances/pypy/module/_weakref/interp__weakref.py ============================================================================== --- pypy/trunk/pypy/module/_weakref/interp__weakref.py (original) +++ pypy/branch/better-map-instances/pypy/module/_weakref/interp__weakref.py Fri Sep 17 15:43:13 2010 @@ -7,7 +7,7 @@ import weakref -class WeakrefLifeline(object): +class WeakrefLifeline(W_Root): def __init__(self, space): self.space = space # this is here for W_Root.clear_all_weakrefs() self.refs_weak = [] Modified: pypy/branch/better-map-instances/pypy/module/cpyext/classobject.py ============================================================================== --- pypy/trunk/pypy/module/cpyext/classobject.py (original) +++ pypy/branch/better-map-instances/pypy/module/cpyext/classobject.py Fri Sep 17 15:43:13 2010 @@ -15,16 +15,20 @@ class is the class of new object. The dict parameter will be used as the object's __dict__; if NULL, a new dictionary will be created for the instance.""" - if not PyClass_Check(space, w_class): + if not isinstance(w_class, W_ClassObject): return PyErr_BadInternalCall(space) - return W_InstanceObject(space, w_class, w_dict) + w_result = w_class.instantiate(space) + if w_dict is not None: + w_result.setdict(space, w_dict) + return w_result @cpython_api([PyObject, PyObject], PyObject, error=CANNOT_FAIL) def _PyInstance_Lookup(space, w_instance, w_name): + name = space.str_w(w_name) assert isinstance(w_instance, W_InstanceObject) - w_result = space.finditem(w_instance.w_dict, w_name) + w_result = w_instance.getdictvalue(space, name) if w_result is not None: return w_result - return w_instance.w_class.lookup(space, w_name) + return w_instance.w_class.lookup(space, name) Modified: pypy/branch/better-map-instances/pypy/module/pypyjit/test/test_pypy_c.py ============================================================================== --- pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py (original) +++ pypy/branch/better-map-instances/pypy/module/pypyjit/test/test_pypy_c.py Fri Sep 17 15:43:13 2010 @@ -272,7 +272,7 @@ assert len(ops) == 2 assert not ops[0].get_opnames("call") assert not ops[0].get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 7 + assert len(ops[0].get_opnames("guard")) <= 2 assert not ops[1] # second LOOKUP_METHOD folded away ops = self.get_by_bytecode("CALL_METHOD") @@ -283,7 +283,7 @@ else: assert not bytecode.get_opnames("call") assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 9 + assert len(bytecode.get_opnames("guard")) <= 6 assert len(ops[1]) < len(ops[0]) ops = self.get_by_bytecode("LOAD_ATTR") @@ -317,8 +317,8 @@ assert len(ops) == 2 assert not ops[0].get_opnames("call") assert not ops[0].get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 7 - assert len(ops[0].get_opnames("getfield")) < 6 + assert len(ops[0].get_opnames("guard")) <= 2 + assert len(ops[0].get_opnames("getfield")) < 5 assert not ops[1] # second LOOKUP_METHOD folded away def test_default_and_kw(self): @@ -382,7 +382,7 @@ a.x = 2 i = i + a.x return i - ''', 67, + ''', 69, ([20], 20), ([31], 32)) @@ -390,7 +390,7 @@ self.get_by_bytecode("CALL_FUNCTION")) assert not callA.get_opnames("call") assert not callA.get_opnames("new") - assert len(callA.get_opnames("guard")) <= 8 + assert len(callA.get_opnames("guard")) <= 2 assert not callisinstance1.get_opnames("call") assert not callisinstance1.get_opnames("new") assert len(callisinstance1.get_opnames("guard")) <= 2 @@ -742,6 +742,8 @@ '''%(op1, float(a)/4.0, float(b)/4.0, op2), 109, ([], res)) def test_boolrewrite_ptr(self): + # XXX this test is way too imprecise in what it is actually testing + # it should count the number of guards instead compares = ('a == b', 'b == a', 'a != b', 'b != a', 'a == c', 'c != b') for e1 in compares: for e2 in compares: @@ -765,7 +767,7 @@ print print 'Test:', e1, e2, n, res self.run_source(''' - class tst: + class tst(object): pass def main(): a = tst() @@ -848,6 +850,8 @@ ''', 65, ([], 122880)) def test_array_intimg(self): + # XXX this test is way too imprecise in what it is actually testing + # it should count the number of guards instead for tc, maxops in zip('ilILd', (67, 67, 69, 69, 61)): res = 73574560 if tc in 'IL': Modified: pypy/branch/better-map-instances/pypy/objspace/std/celldict.py ============================================================================== --- pypy/trunk/pypy/objspace/std/celldict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/celldict.py Fri Sep 17 15:43:13 2010 @@ -45,7 +45,7 @@ if space.is_w(space.type(w_key), space.w_str): self.impl_setitem_str(self.space.str_w(w_key), w_value) else: - self._as_rdict().setitem(w_key, w_value) + self._as_rdict().impl_fallback_setitem(w_key, w_value) def impl_setitem_str(self, name, w_value, shadows_type=True): self.getcell(name, True).w_value = w_value @@ -66,7 +66,7 @@ elif _is_sane_hash(space, w_key_type): raise KeyError else: - self._as_rdict().delitem(w_key) + self._as_rdict().impl_fallback_delitem(w_key) def impl_length(self): # inefficient, but do we care? @@ -85,7 +85,7 @@ elif _is_sane_hash(space, w_lookup_type): return None else: - return self._as_rdict().getitem(w_lookup) + return self._as_rdict().impl_fallback_getitem(w_lookup) def impl_getitem_str(self, lookup): res = self.getcell(lookup, False) Modified: pypy/branch/better-map-instances/pypy/objspace/std/dictmultiobject.py ============================================================================== --- pypy/trunk/pypy/objspace/std/dictmultiobject.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/dictmultiobject.py Fri Sep 17 15:43:13 2010 @@ -102,17 +102,17 @@ else: return None - # _________________________________________________________________ + # _________________________________________________________________ # implementation methods def impl_getitem(self, w_key): #return w_value or None raise NotImplementedError("abstract base class") - def impl_getitem_str(self, w_key): + def impl_getitem_str(self, key): #return w_value or None raise NotImplementedError("abstract base class") - def impl_setitem_str(self, key, w_value, shadows_type=True): + def impl_setitem_str(self, key, w_value, shadows_type=True): raise NotImplementedError("abstract base class") def impl_setitem(self, w_key, w_value): @@ -120,7 +120,7 @@ def impl_delitem(self, w_key): raise NotImplementedError("abstract base class") - + def impl_length(self): raise NotImplementedError("abstract base class") @@ -310,7 +310,7 @@ if space.is_w(space.type(w_key), space.w_str): self.impl_setitem_str(self.space.str_w(w_key), w_value) else: - self._as_rdict().setitem(w_key, w_value) + self._as_rdict().impl_fallback_setitem(w_key, w_value) def impl_setitem_str(self, key, w_value, shadows_type=True): self.content[key] = w_value @@ -324,7 +324,7 @@ elif _is_sane_hash(space, w_key_type): raise KeyError else: - self._as_rdict().delitem(w_key) + self._as_rdict().impl_fallback_delitem(w_key) def impl_length(self): return len(self.content) @@ -344,7 +344,7 @@ elif _is_sane_hash(space, w_lookup_type): return None else: - return self._as_rdict().getitem(w_key) + return self._as_rdict().impl_fallback_getitem(w_key) def impl_iter(self): return StrIteratorImplementation(self.space, self) @@ -414,7 +414,7 @@ StrDictImplementation.impl_setitem_str( self, self.space.str_w(w_key), w_value, False) else: - self._as_rdict().setitem(w_key, w_value) + self._as_rdict().impl_fallback_setitem(w_key, w_value) def impl_shadows_anything(self): return (self._shadows_anything or @@ -446,7 +446,7 @@ elif _is_sane_hash(space, w_key_type): raise KeyError else: - self._as_rdict().delitem(w_key) + self._as_rdict().impl_fallback_delitem(w_key) def impl_get_builtin_indexed(self, i): return self.shadowed[i] Modified: pypy/branch/better-map-instances/pypy/objspace/std/objspace.py ============================================================================== --- pypy/trunk/pypy/objspace/std/objspace.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/objspace.py Fri Sep 17 15:43:13 2010 @@ -23,6 +23,7 @@ from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.longobject import W_LongObject from pypy.objspace.std.noneobject import W_NoneObject +from pypy.objspace.std.objectobject import W_ObjectObject from pypy.objspace.std.ropeobject import W_RopeObject from pypy.objspace.std.iterobject import W_SeqIterObject from pypy.objspace.std.setobject import W_SetObject, W_FrozensetObject @@ -318,9 +319,14 @@ w_subtype = w_type.check_user_subclass(w_subtype) if cls.typedef.applevel_subclasses_base is not None: cls = cls.typedef.applevel_subclasses_base - subcls = get_unique_interplevel_subclass( - self.config, cls, w_subtype.hasdict, w_subtype.nslots != 0, - w_subtype.needsdel, w_subtype.weakrefable) + if (self.config.objspace.std.withmapdict and cls is W_ObjectObject + and not w_subtype.needsdel): + from pypy.objspace.std.mapdict import get_subclass_of_correct_size + subcls = get_subclass_of_correct_size(self, cls, w_subtype) + else: + subcls = get_unique_interplevel_subclass( + self.config, cls, w_subtype.hasdict, w_subtype.nslots != 0, + w_subtype.needsdel, w_subtype.weakrefable) instance = instantiate(subcls) assert isinstance(instance, cls) instance.user_setup(self, w_subtype) Modified: pypy/branch/better-map-instances/pypy/objspace/std/sharingdict.py ============================================================================== --- pypy/trunk/pypy/objspace/std/sharingdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/sharingdict.py Fri Sep 17 15:43:13 2010 @@ -71,7 +71,7 @@ elif _is_sane_hash(space, w_lookup_type): return None else: - return self._as_rdict().getitem(w_lookup) + return self._as_rdict().impl_fallback_getitem(w_lookup) def impl_getitem_str(self, lookup): i = self.structure.lookup_position(lookup) @@ -84,7 +84,7 @@ if space.is_w(space.type(w_key), space.w_str): self.impl_setitem_str(self.space.str_w(w_key), w_value) else: - self._as_rdict().setitem(w_key, w_value) + self._as_rdict().impl_fallback_setitem(w_key, w_value) @unroll_safe def impl_setitem_str(self, key, w_value, shadows_type=True): @@ -132,7 +132,7 @@ elif _is_sane_hash(space, w_key_type): raise KeyError else: - self._as_rdict().delitem(w_key) + self._as_rdict().impl_fallback_delitem(w_key) def impl_length(self): return self.structure.length Modified: pypy/branch/better-map-instances/pypy/objspace/std/test/test_dictmultiobject.py ============================================================================== --- pypy/trunk/pypy/objspace/std/test/test_dictmultiobject.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/test/test_dictmultiobject.py Fri Sep 17 15:43:13 2010 @@ -602,6 +602,15 @@ classofinstance=classofinstance, from_strdict_shared=from_strdict_shared) + def finditem_str(self, w_dict, s): + return w_dict.getitem_str(s) # assume it's a multidict + + def setitem_str(self, w_dict, s, w_value): + return w_dict.setitem_str(s, w_value) # assume it's a multidict + + def delitem(self, w_dict, w_s): + return w_dict.delitem(w_s) # assume it's a multidict + def allocate_instance(self, cls, type): return object.__new__(cls) @@ -611,7 +620,7 @@ w_StopIteration = StopIteration w_None = None StringObjectCls = FakeString - w_dict = None + w_dict = W_DictMultiObject iter = iter fixedview = list listview = list @@ -687,6 +696,14 @@ assert self.impl.length() == 0 self.check_not_devolved() + def test_clear(self): + self.fill_impl() + assert self.impl.length() == 2 + self.impl.clear() + assert self.impl.length() == 0 + self.check_not_devolved() + + def test_keys(self): self.fill_impl() keys = self.impl.keys() Modified: pypy/branch/better-map-instances/pypy/objspace/std/test/test_shadowtracking.py ============================================================================== --- pypy/trunk/pypy/objspace/std/test/test_shadowtracking.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/test/test_shadowtracking.py Fri Sep 17 15:43:13 2010 @@ -3,7 +3,8 @@ class TestShadowTracking(object): def setup_class(cls): - cls.space = gettestobjspace(**{"objspace.std.withshadowtracking": True}) + cls.space = gettestobjspace(**{"objspace.std.withshadowtracking": True, + "objspace.std.withmapdict": False}) def test_simple_shadowing(self): space = self.space Modified: pypy/branch/better-map-instances/pypy/objspace/std/typeobject.py ============================================================================== --- pypy/trunk/pypy/objspace/std/typeobject.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/typeobject.py Fri Sep 17 15:43:13 2010 @@ -75,7 +75,9 @@ 'weakrefable', 'hasdict', 'nslots', - 'instancetypedef'] + 'instancetypedef', + 'terminator', + ] # for config.objspace.std.getattributeshortcut # (False is a conservative default, fixed during real usage) @@ -116,6 +118,12 @@ # dict_w of any of the types in the mro changes, or if the mro # itself changes w_self._version_tag = VersionTag() + if space.config.objspace.std.withmapdict: + from pypy.objspace.std.mapdict import DictTerminator, NoDictTerminator + if w_self.hasdict: + w_self.terminator = DictTerminator(space, w_self) + else: + w_self.terminator = NoDictTerminator(space, w_self) def mutated(w_self): space = w_self.space From fijal at codespeak.net Fri Sep 17 15:43:38 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Fri, 17 Sep 2010 15:43:38 +0200 (CEST) Subject: [pypy-svn] r77149 - pypy/trunk/pypy/interpreter Message-ID: <20100917134338.3497A282BD4@codespeak.net> Author: fijal Date: Fri Sep 17 15:43:36 2010 New Revision: 77149 Modified: pypy/trunk/pypy/interpreter/baseobjspace.py Log: Kill useless assert Modified: pypy/trunk/pypy/interpreter/baseobjspace.py ============================================================================== --- pypy/trunk/pypy/interpreter/baseobjspace.py (original) +++ pypy/trunk/pypy/interpreter/baseobjspace.py Fri Sep 17 15:43:36 2010 @@ -71,7 +71,8 @@ space.wrap("__class__ assignment: only for heap types")) def user_setup(self, space, w_subtype): - assert False, "only for interp-level user subclasses from typedef.py" + raise NotImplementedError("only for interp-level user subclasses " + "from typedef.py") def getname(self, space, default): try: From fijal at codespeak.net Fri Sep 17 15:46:13 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Fri, 17 Sep 2010 15:46:13 +0200 (CEST) Subject: [pypy-svn] r77150 - in pypy/trunk/pypy/module/select: . test Message-ID: <20100917134613.CFC48282B9E@codespeak.net> Author: fijal Date: Fri Sep 17 15:46:12 2010 New Revision: 77150 Modified: pypy/trunk/pypy/module/select/interp_select.py pypy/trunk/pypy/module/select/test/test_select.py Log: Merge 77128 from rsocket-improvements branch Modified: pypy/trunk/pypy/module/select/interp_select.py ============================================================================== --- pypy/trunk/pypy/module/select/interp_select.py (original) +++ pypy/trunk/pypy/module/select/interp_select.py Fri Sep 17 15:46:12 2010 @@ -54,14 +54,11 @@ if space.is_w(w_timeout, space.w_None): timeout = -1 else: - # rationale for computing directly integer, instead - # of float + math.cell is that - # we have for free overflow check and noone really - # cares (since CPython does not try too hard to have - # a ceiling of value) + # we want to be compatible with cpython and also accept things + # that can be casted to integer (I think) try: # compute the integer - timeout = space.int_w(w_timeout) + timeout = space.int_w(space.int(w_timeout)) except (OverflowError, ValueError): raise OperationError(space.w_ValueError, space.wrap("math range error")) Modified: pypy/trunk/pypy/module/select/test/test_select.py ============================================================================== --- pypy/trunk/pypy/module/select/test/test_select.py (original) +++ pypy/trunk/pypy/module/select/test/test_select.py Fri Sep 17 15:46:12 2010 @@ -210,6 +210,14 @@ assert len(res[2]) == 0 assert res[0][0] == res[1][0] + def test_poll(self): + import select + class A(object): + def __int__(self): + return 3 + + select.poll().poll(A()) # assert did not crash + class AppTestSelectWithPipes(_AppTestSelect): "Use a pipe to get pairs of file descriptors" def setup_class(cls): @@ -275,4 +283,3 @@ s1, addr2 = cls.sock.accept() return s1, s2 - From fijal at codespeak.net Fri Sep 17 15:47:28 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Fri, 17 Sep 2010 15:47:28 +0200 (CEST) Subject: [pypy-svn] r77151 - pypy/trunk/pypy/rlib Message-ID: <20100917134728.4BA3C282B9E@codespeak.net> Author: fijal Date: Fri Sep 17 15:47:26 2010 New Revision: 77151 Modified: pypy/trunk/pypy/rlib/_rsocket_rffi.py Log: Merge 77127 from rsocket-improvements branch Modified: pypy/trunk/pypy/rlib/_rsocket_rffi.py ============================================================================== --- pypy/trunk/pypy/rlib/_rsocket_rffi.py (original) +++ pypy/trunk/pypy/rlib/_rsocket_rffi.py Fri Sep 17 15:47:26 2010 @@ -32,11 +32,13 @@ 'arpa/inet.h', 'stdint.h', 'errno.h', - 'netpacket/packet.h', - 'sys/ioctl.h', - 'net/if.h', ) - cond_includes = [('AF_NETLINK', 'linux/netlink.h')] + + cond_includes = [('AF_NETLINK', 'linux/netlink.h'), + ('AF_PACKET', 'netpacket/packet.h'), + ('AF_PACKET', 'sys/ioctl.h'), + ('AF_PACKET', 'net/if.h')] + libraries = () calling_conv = 'c' HEADER = ''.join(['#include <%s>\n' % filename for filename in includes]) From fijal at codespeak.net Fri Sep 17 15:48:21 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Fri, 17 Sep 2010 15:48:21 +0200 (CEST) Subject: [pypy-svn] r77152 - pypy/trunk/pypy/rpython/numpy Message-ID: <20100917134821.A962F282B9E@codespeak.net> Author: fijal Date: Fri Sep 17 15:48:20 2010 New Revision: 77152 Removed: pypy/trunk/pypy/rpython/numpy/ Log: Remove numpy, it's unused for ages From agaynor at codespeak.net Fri Sep 17 16:24:08 2010 From: agaynor at codespeak.net (agaynor at codespeak.net) Date: Fri, 17 Sep 2010 16:24:08 +0200 (CEST) Subject: [pypy-svn] r77154 - in pypy/trunk/pypy/jit/metainterp: optimizeopt test Message-ID: <20100917142408.002B8282B9E@codespeak.net> Author: agaynor Date: Fri Sep 17 16:24:07 2010 New Revision: 77154 Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py Log: Propogate the fact that arraylen_gc can never return anything less than 0. Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py Fri Sep 17 16:24:07 2010 @@ -1,6 +1,7 @@ from optimizer import Optimization, CONST_1, CONST_0 from pypy.jit.metainterp.optimizeutil import _findall -from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded +from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded, \ + IntLowerBound from pypy.jit.metainterp.history import Const, ConstInt from pypy.jit.metainterp.resoperation import rop, ResOperation @@ -25,7 +26,7 @@ b = v.intbound if b.has_lower and b.has_upper and b.lower == b.upper: v.make_constant(ConstInt(b.lower)) - + try: op = self.optimizer.producer[box] except KeyError: @@ -183,7 +184,12 @@ self.make_constant_int(op.result, 1) else: self.emit_operation(op) - + + def optimize_ARRAYLEN_GC(self, op): + self.emit_operation(op) + v1 = self.getvalue(op.result) + v1.intbound.make_ge(IntLowerBound(0)) + def make_int_lt(self, args): v1 = self.getvalue(args[0]) v2 = self.getvalue(args[1]) Modified: pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py Fri Sep 17 16:24:07 2010 @@ -33,7 +33,7 @@ self.profiler = EmptyProfiler() self.options = Fake() self.globaldata = Fake() - + def test_store_final_boxes_in_guard(): from pypy.jit.metainterp.compile import ResumeGuardDescr from pypy.jit.metainterp.resume import tag, TAGBOX @@ -75,7 +75,7 @@ assert lst3 == [LLtypeMixin.valuedescr] lst4 = virt1._get_field_descr_list() assert lst3 is lst4 - + virt2 = virtualize.AbstractVirtualStructValue(opt, None) lst5 = virt2._get_field_descr_list() assert lst5 is lst1 @@ -489,7 +489,7 @@ jump() """ self.optimize_loop(ops, 'Constant(myptr)', expected) - + def test_ooisnull_oononnull_1(self): ops = """ [p0] @@ -842,7 +842,7 @@ jump(f, f1) """ self.optimize_loop(ops, 'Not, Virtual(node_vtable, floatdescr=Not)', - expected, checkspecnodes=False) + expected, checkspecnodes=False) def test_virtual_2(self): ops = """ @@ -2171,7 +2171,7 @@ jump(i1, i0) """ self.optimize_loop(ops, 'Not, Not', expected) - + def test_fold_partially_constant_ops(self): ops = """ [i0] @@ -2183,7 +2183,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + ops = """ [i0] i1 = int_add(i0, 0) @@ -2194,7 +2194,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + ops = """ [i0] i1 = int_add(0, i0) @@ -2205,7 +2205,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + # ---------- def make_fail_descr(self): @@ -3119,7 +3119,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_noguard(self): ops = """ [i0] @@ -3134,7 +3134,7 @@ jump(i2) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_noopt(self): ops = """ [i0] @@ -3153,7 +3153,7 @@ jump(4) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_rev(self): ops = """ [i0] @@ -3170,7 +3170,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_tripple(self): ops = """ [i0] @@ -3189,7 +3189,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_add(self): ops = """ [i0] @@ -3204,11 +3204,11 @@ [i0] i1 = int_lt(i0, 4) guard_true(i1) [] - i2 = int_add(i0, 10) + i2 = int_add(i0, 10) jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_add_before(self): ops = """ [i0] @@ -3227,7 +3227,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_add_ovf(self): ops = """ [i0] @@ -3243,11 +3243,11 @@ [i0] i1 = int_lt(i0, 4) guard_true(i1) [] - i2 = int_add(i0, 10) + i2 = int_add(i0, 10) jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_add_ovf_before(self): ops = """ [i0] @@ -3268,7 +3268,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_sub(self): ops = """ [i0] @@ -3283,11 +3283,11 @@ [i0] i1 = int_lt(i0, 4) guard_true(i1) [] - i2 = int_sub(i0, 10) + i2 = int_sub(i0, 10) jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_sub_before(self): ops = """ [i0] @@ -3306,7 +3306,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_ltle(self): ops = """ [i0] @@ -3357,7 +3357,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_gtge(self): ops = """ [i0] @@ -3374,7 +3374,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_gegt(self): ops = """ [i0] @@ -3414,6 +3414,26 @@ """ self.optimize_loop(ops, 'Not', expected) + def test_bound_arraylen(self): + ops = """ + [i0, p0] + p1 = new_array(i0, descr=arraydescr) + i1 = arraylen_gc(p1) + i2 = int_gt(i1, -1) + guard_true(i2) [] + setarrayitem_gc(p0, 0, p1) + jump(i0, p0) + """ + # The dead arraylen_gc will be eliminated by the backend. + expected = """ + [i0, p0] + p1 = new_array(i0, descr=arraydescr) + i1 = arraylen_gc(p1) + setarrayitem_gc(p0, 0, p1) + jump(i0, p0) + """ + self.optimize_loop(ops, 'Not, Not', expected) + def test_addsub_const(self): ops = """ [i0] @@ -3558,7 +3578,7 @@ i14 = int_gt(i1, 10) guard_true(i14) [] i15 = int_ge(i1, 20) - guard_true(i15) [] + guard_true(i15) [] jump(i1) """ expected = """ @@ -3571,7 +3591,7 @@ i14 = int_gt(i1, 10) guard_true(i14) [] i15 = int_ge(i1, 20) - guard_true(i15) [] + guard_true(i15) [] jump(i1) """ self.optimize_loop(ops, 'Not', expected) @@ -3818,7 +3838,7 @@ """ self.optimize_loop(ops, 'Not, Not', expected) - + ##class TestOOtype(BaseTestOptimizeOpt, OOtypeMixin): @@ -3835,7 +3855,7 @@ ## jump(1) ## """ ## self.optimize_loop(ops, 'Not', expected) - + ## def test_instanceof_guard_class(self): ## ops = """ ## [i0, p0] From antocuni at codespeak.net Fri Sep 17 16:59:05 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Fri, 17 Sep 2010 16:59:05 +0200 (CEST) Subject: [pypy-svn] r77155 - in pypy/branch/resoperation-refactoring/pypy/jit: backend/cli backend/llgraph backend/test backend/x86 backend/x86/test metainterp metainterp/optimizeopt metainterp/test Message-ID: <20100917145905.31080282B9E@codespeak.net> Author: antocuni Date: Fri Sep 17 16:59:02 2010 New Revision: 77155 Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/cli/method.py pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py pypy/branch/resoperation-refactoring/pypy/jit/backend/test/runner_test.py pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_ll_random.py pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/assembler.py pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_regalloc.py pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_runner.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/oparser.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_basic.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_loop.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_oparser.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_virtualref.py Log: (david, antocuni): remove the fail_args field, and use the official API everywhere Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/cli/method.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/cli/method.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/cli/method.py Fri Sep 17 16:59:02 2010 @@ -357,10 +357,10 @@ assert op.is_guard() if op in self.cliloop.guard2ops: inputargs, suboperations = self.cliloop.guard2ops[op] - self.match_var_fox_boxes(op.fail_args, inputargs) + self.match_var_fox_boxes(op.getfailargs(), inputargs) self.emit_operations(suboperations) else: - self.emit_return_failed_op(op, op.fail_args) + self.emit_return_failed_op(op, op.getfailargs()) def emit_end(self): assert self.branches == [] Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py Fri Sep 17 16:59:02 2010 @@ -177,7 +177,7 @@ faildescr = op.getdescr() assert isinstance(faildescr, history.AbstractFailDescr) faildescr._fail_args_types = [] - for box in op.fail_args: + for box in op.getfailargs(): if box is None: type = history.HOLE else: @@ -186,7 +186,7 @@ fail_index = self.get_fail_descr_number(faildescr) index = llimpl.compile_add_fail(c, fail_index) faildescr._compiled_fail = c, index - for box in op.fail_args: + for box in op.getfailargs(): if box is not None: llimpl.compile_add_fail_arg(c, var2index[box]) else: Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/test/runner_test.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/test/runner_test.py Fri Sep 17 16:59:02 2010 @@ -75,7 +75,7 @@ ResOperation(rop.FINISH, results, None, descr=BasicFailDescr(0))] if operations[0].is_guard(): - operations[0].fail_args = [] + operations[0].setfailargs([]) if not descr: descr = BasicFailDescr(1) operations[0].setdescr(descr) @@ -117,7 +117,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [i1] + operations[2].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) @@ -138,7 +138,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [None, None, i1, None] + operations[2].setfailargs([None, None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) @@ -161,7 +161,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [i1] + operations[2].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) self.cpu.compile_loop(inputargs, operations, looptoken) @@ -185,7 +185,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [i1] + operations[2].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -195,7 +195,7 @@ ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), ResOperation(rop.JUMP, [i1b], None, descr=looptoken), ] - bridge[1].fail_args = [i1b] + bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge) @@ -219,7 +219,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [None, i1, None] + operations[2].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -229,7 +229,7 @@ ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), ResOperation(rop.JUMP, [i1b], None, descr=looptoken), ] - bridge[1].fail_args = [i1b] + bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge) @@ -252,7 +252,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [None, i1, None] + operations[2].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) @@ -318,7 +318,7 @@ descr=BasicFailDescr()), ResOperation(rop.JUMP, [z, t], None, descr=looptoken), ] - operations[-2].fail_args = [t, z] + operations[-2].setfailargs([t, z]) cpu.compile_loop([x, y], operations, looptoken) self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 10) @@ -364,7 +364,7 @@ ResOperation(rop.FINISH, [v_res], None, descr=BasicFailDescr(2)), ] - ops[1].fail_args = [] + ops[1].setfailargs([]) else: v_exc = self.cpu.ts.BoxRef() ops = [ @@ -373,7 +373,7 @@ descr=BasicFailDescr(1)), ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2)), ] - ops[1].fail_args = [v_res] + ops[1].setfailargs([v_res]) # looptoken = LoopToken() self.cpu.compile_loop([v1, v2], ops, looptoken) @@ -910,7 +910,7 @@ ResOperation(rop.GUARD_TRUE, [i2], None), ResOperation(rop.JUMP, jumpargs, None, descr=looptoken), ] - operations[2].fail_args = inputargs[:] + operations[2].setfailargs(inputargs[:]) operations[2].setdescr(faildescr) # self.cpu.compile_loop(inputargs, operations, looptoken) @@ -976,7 +976,7 @@ ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), ResOperation(rop.FINISH, fboxes, None, descr=faildescr2), ] - operations[-2].fail_args = fboxes + operations[-2].setfailargs(fboxes) looptoken = LoopToken() self.cpu.compile_loop(fboxes, operations, looptoken) @@ -1099,7 +1099,7 @@ descr=BasicFailDescr(4)), ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(5))] - operations[1].fail_args = [] + operations[1].setfailargs([]) looptoken = LoopToken() # Use "set" to unique-ify inputargs unique_testcase_list = list(set(testcase)) @@ -1463,7 +1463,7 @@ ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] - ops[2].fail_args = [i1, i0] + ops[2].setfailargs([i1, i0]) looptoken = LoopToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, 20) @@ -1507,7 +1507,7 @@ ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] - ops[2].fail_args = [i1, i2, i0] + ops[2].setfailargs([i1, i2, i0]) looptoken = LoopToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, 20) @@ -1552,7 +1552,7 @@ ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [f2], None, descr=BasicFailDescr(0)) ] - ops[2].fail_args = [i1, f2, i0] + ops[2].setfailargs([i1, f2, i0]) looptoken = LoopToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, 20) Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_ll_random.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_ll_random.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_ll_random.py Fri Sep 17 16:59:02 2010 @@ -464,7 +464,7 @@ self.put(builder, args, descr) op = ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=BasicFailDescr()) - op.fail_args = fail_subset + op.setfailargs(fail_subset) builder.loop.operations.append(op) # 5. Non raising-call and GUARD_EXCEPTION @@ -486,7 +486,7 @@ exc_box = ConstAddr(llmemory.cast_ptr_to_adr(vtableptr), builder.cpu) op = ResOperation(rop.GUARD_EXCEPTION, [exc_box], BoxPtr(), descr=BasicFailDescr()) - op.fail_args = builder.subset_of_intvars(r) + op.setfailargs(builder.subset_of_intvars(r)) op._exc_box = None builder.should_fail_by = op builder.guard_op = op @@ -507,7 +507,7 @@ exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu) op = ResOperation(rop.GUARD_EXCEPTION, [exc_box], BoxPtr(), descr=BasicFailDescr()) - op.fail_args = fail_subset + op.setfailargs(fail_subset) builder.loop.operations.append(op) # 4. raising call and guard_no_exception @@ -524,7 +524,7 @@ op = ResOperation(rop.GUARD_NO_EXCEPTION, [], BoxPtr(), descr=BasicFailDescr()) op._exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu) - op.fail_args = builder.subset_of_intvars(r) + op.setfailargs(builder.subset_of_intvars(r)) builder.should_fail_by = op builder.guard_op = op builder.loop.operations.append(op) @@ -548,7 +548,7 @@ op = ResOperation(rop.GUARD_EXCEPTION, [other_box], BoxPtr(), descr=BasicFailDescr()) op._exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu) - op.fail_args = builder.subset_of_intvars(r) + op.setfailargs(builder.subset_of_intvars(r)) builder.should_fail_by = op builder.guard_op = op builder.loop.operations.append(op) Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/test/test_random.py Fri Sep 17 16:59:02 2010 @@ -191,7 +191,7 @@ if self.should_fail_by is None: fail_args = self.loop.operations[-1].args else: - fail_args = self.should_fail_by.fail_args + fail_args = self.should_fail_by.getfailargs() for i, v in enumerate(fail_args): if isinstance(v, (BoxFloat, ConstFloat)): print >>s, (' assert cpu.get_latest_value_float(%d) == %r' @@ -285,7 +285,7 @@ else: op = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) op.setdescr(BasicFailDescr()) - op.fail_args = fail_subset + op.setfailargs(fail_subset) builder.loop.operations.append(op) class BinaryOvfOperation(AbstractOvfOperation, BinaryOperation): @@ -346,7 +346,7 @@ op, passing = self.gen_guard(builder, r) builder.loop.operations.append(op) op.setdescr(BasicFailDescr()) - op.fail_args = builder.subset_of_intvars(r) + op.setfailargs(builder.subset_of_intvars(r)) if not passing: builder.should_fail_by = op builder.guard_op = op @@ -577,8 +577,8 @@ def get_fail_args(self): if self.should_fail_by.is_guard(): - assert self.should_fail_by.fail_args is not None - return self.should_fail_by.fail_args + assert self.should_fail_by.getfailargs() is not None + return self.should_fail_by.getfailargs() else: assert self.should_fail_by.getopnum() == rop.FINISH return self.should_fail_by.getarglist() @@ -634,25 +634,25 @@ op = ResOperation(rop.GUARD_EXCEPTION, [guard_op._exc_box], BoxPtr()) op.setdescr(BasicFailDescr()) - op.fail_args = [] + op.setfailargs([]) return op if self.dont_generate_more: return False r = self.r guard_op = self.guard_op - fail_args = guard_op.fail_args + fail_args = guard_op.getfailargs() fail_descr = guard_op.getdescr() op = self.should_fail_by - if not op.fail_args: + if not op.getfailargs(): return False # generate the branch: a sequence of operations that ends in a FINISH subloop = DummyLoop([]) if guard_op.is_guard_exception(): subloop.operations.append(exc_handling(guard_op)) bridge_builder = self.builder.fork(self.builder.cpu, subloop, - op.fail_args[:]) - self.generate_ops(bridge_builder, r, subloop, op.fail_args[:]) + op.getfailargs()[:]) + self.generate_ops(bridge_builder, r, subloop, op.getfailargs()[:]) # note that 'self.guard_op' now points to the guard that will fail in # this new bridge, while 'guard_op' still points to the guard that # has just failed. Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/assembler.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/assembler.py Fri Sep 17 16:59:02 2010 @@ -691,7 +691,7 @@ faildescr = guard_op.getdescr() assert isinstance(faildescr, AbstractFailDescr) faildescr._x86_current_depths = current_depths - failargs = guard_op.fail_args + failargs = guard_op.getfailargs() guard_opnum = guard_op.getopnum() guard_token = self.implement_guard_recovery(guard_opnum, faildescr, failargs, Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py Fri Sep 17 16:59:02 2010 @@ -318,7 +318,7 @@ self.assembler.regalloc_perform(op, arglocs, result_loc) def locs_for_fail(self, guard_op): - return [self.loc(v) for v in guard_op.fail_args] + return [self.loc(v) for v in guard_op.getfailargs()] def perform_with_guard(self, op, guard_op, arglocs, result_loc): faillocs = self.locs_for_fail(guard_op) @@ -330,7 +330,7 @@ current_depths) if op.result is not None: self.possibly_free_var(op.result) - self.possibly_free_vars(guard_op.fail_args) + self.possibly_free_vars(guard_op.getfailargs()) def perform_guard(self, guard_op, arglocs, result_loc): faillocs = self.locs_for_fail(guard_op) @@ -344,7 +344,7 @@ self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs, result_loc, current_depths) - self.possibly_free_vars(guard_op.fail_args) + self.possibly_free_vars(guard_op.getfailargs()) def PerformDiscard(self, op, arglocs): if not we_are_translated(): @@ -369,7 +369,7 @@ if operations[i + 1].getarg(0) is not op.result: return False if (self.longevity[op.result][1] > i + 1 or - op.result in operations[i + 1].fail_args): + op.result in operations[i + 1].getfailargs()): return False return True @@ -416,7 +416,7 @@ raise AssertionError longevity[arg] = (start_live[arg], i) if op.is_guard(): - for arg in op.fail_args: + for arg in op.getfailargs(): if arg is None: # hole continue assert isinstance(arg, Box) Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_regalloc.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_regalloc.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_regalloc.py Fri Sep 17 16:59:02 2010 @@ -159,7 +159,7 @@ assert guard_op.is_guard() bridge = self.parse(ops, **kwds) assert ([box.type for box in bridge.inputargs] == - [box.type for box in guard_op.fail_args]) + [box.type for box in guard_op.getfailargs()]) faildescr = guard_op.getdescr() self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations) return bridge Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_runner.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_runner.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_runner.py Fri Sep 17 16:59:02 2010 @@ -265,7 +265,7 @@ ResOperation(rop.FINISH, [ConstInt(0)], None, descr=BasicFailDescr()), ] - ops[-2].fail_args = [i1] + ops[-2].setfailargs([i1]) looptoken = LoopToken() self.cpu.compile_loop([b], ops, looptoken) if op == rop.INT_IS_TRUE: @@ -314,7 +314,7 @@ ResOperation(rop.FINISH, [ConstInt(0)], None, descr=BasicFailDescr()), ] - ops[-2].fail_args = [i1] + ops[-2].setfailargs([i1]) inputargs = [i for i in (a, b) if isinstance(i, Box)] looptoken = LoopToken() self.cpu.compile_loop(inputargs, ops, looptoken) @@ -353,7 +353,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[3].fail_args = [i1] + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) name, loopaddress, loopsize = agent.functions[0] assert name == "Loop # 0: hello" @@ -368,7 +368,7 @@ ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye")], None), ResOperation(rop.JUMP, [i1b], None, descr=looptoken), ] - bridge[1].fail_args = [i1b] + bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge) name, address, size = agent.functions[1] @@ -462,7 +462,7 @@ cmp_result = BoxInt() ops.append(ResOperation(float_op, args, cmp_result)) ops.append(ResOperation(guard_op, [cmp_result], None, descr=BasicFailDescr())) - ops[-1].fail_args = [failed] + ops[-1].setfailargs([failed]) ops.append(ResOperation(rop.FINISH, [finished], None, descr=BasicFailDescr())) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py Fri Sep 17 16:59:02 2010 @@ -233,14 +233,14 @@ self.metainterp_sd = metainterp_sd def store_final_boxes(self, guard_op, boxes): - guard_op.fail_args = boxes + guard_op.setfailargs(boxes) self.guard_opnum = guard_op.getopnum() def make_a_counter_per_value(self, guard_value_op): assert guard_value_op.getopnum() == rop.GUARD_VALUE box = guard_value_op.getarg(0) try: - i = guard_value_op.fail_args.index(box) + i = guard_value_op.getfailargs().index(box) except ValueError: return # xxx probably very rare else: @@ -598,6 +598,6 @@ ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=faildescr), ResOperation(rop.FINISH, finishargs, None, descr=jd.portal_finishtoken) ] - operations[1].fail_args = [] + operations[1].setfailargs([]) cpu.compile_loop(inputargs, operations, loop_token) return loop_token Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py Fri Sep 17 16:59:02 2010 @@ -773,12 +773,12 @@ if hasattr(op.getdescr(), '_debug_suboperations'): ops = op.getdescr()._debug_suboperations TreeLoop.check_consistency_of_branch(ops, seen.copy()) - for box in op.fail_args or []: + for box in op.getfailargs() or []: if box is not None: assert isinstance(box, Box) assert box in seen else: - assert op.fail_args is None + assert op.getfailargs() is None box = op.result if box is not None: assert isinstance(box, Box) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py Fri Sep 17 16:59:02 2010 @@ -97,9 +97,9 @@ else: r = self.repr_of_descr(descr) args += ', descr=' + r - if is_guard and op.fail_args is not None: + if is_guard and op.getfailargs() is not None: fail_args = ' [' + ", ".join([self.repr_of_arg(memo, arg) - for arg in op.fail_args]) + ']' + for arg in op.getfailargs()]) + ']' else: fail_args = '' debug_print(res + op.getopname() + Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py Fri Sep 17 16:59:02 2010 @@ -362,7 +362,7 @@ else: raise AssertionError("uh?") newop = ResOperation(opnum, [op.getarg(0)], op.result, descr) - newop.fail_args = op.getfailargs() + newop.setfailargs(op.getfailargs()) return newop else: # a real GUARD_VALUE. Make it use one counter per value. Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py Fri Sep 17 16:59:02 2010 @@ -7,12 +7,12 @@ class BaseResOperation(object): """The central ResOperation class, representing one operation.""" - # for 'guard_*' - fail_args = None + __slots__ = ['_fail_args', '_opnum', '_args', 'result', '_descr', + 'name', 'pc', '_exc_box', '__weakref__'] # debug - name = "" - pc = 0 + ## name = "" + ## pc = 0 def __init__(self, opnum, args, result, descr=None): make_sure_not_resized(args) @@ -23,11 +23,13 @@ assert not isinstance(result, list) self.result = result self.setdescr(descr) + + self._fail_args = None + self.pc = 0 + self.name = '' - def __setattr__(self, name, attr): - if name == 'descr': - assert False - object.__setattr__(self, name, attr) + ## def __init__(self, result): + ## self.result = result def copy_and_change(self, opnum, args=None, result=None, descr=None): "shallow copy: the returned operation is meant to be used in place of self" @@ -44,27 +46,35 @@ def getopnum(self): return self._opnum + #raise NotImplementedError def getarg(self, i): return self._args[i] + #raise NotImplementedError def setarg(self, i, box): self._args[i] = box + #raise NotImplementedError def numargs(self): return len(self._args) + #raise NotImplementedError def setarglist(self, args): + # XXX: is it really needed? self._args = args + #raise NotImplementedError def getarglist(self): return self._args + #raise NotImplementedError def getfailargs(self): - return self.fail_args + return self._fail_args + #raise NotImplementedError def setfailargs(self, fail_args): - self.fail_args = fail_args + self._fail_args = fail_args def getdescr(self): return self._descr @@ -83,8 +93,8 @@ descr = self._descr if descr is not None: descr = descr.clone_if_mutable() - op = ResOperation(self._opnum, self._args, self.result, descr) - op.fail_args = self.fail_args + op = ResOperation(self.getopnum(), self._args, self.result, descr) + op._fail_args = self._fail_args op.name = self.name if not we_are_translated(): op.pc = self.pc @@ -103,7 +113,7 @@ prefix = "%s:%s " % (self.name, self.pc) else: prefix = "" - if self._descr is None or we_are_translated(): + if self.getdescr() is None or we_are_translated(): return '%s%s%s(%s)' % (prefix, sres, self.getopname(), ', '.join([str(a) for a in self._args])) else: @@ -112,50 +122,52 @@ def getopname(self): try: - return opname[self._opnum].lower() + return opname[self.getopnum()].lower() except KeyError: - return '<%d>' % self._opnum + return '<%d>' % self.getopnum() def is_guard(self): - return rop._GUARD_FIRST <= self._opnum <= rop._GUARD_LAST + return rop._GUARD_FIRST <= self.getopnum() <= rop._GUARD_LAST def is_foldable_guard(self): - return rop._GUARD_FOLDABLE_FIRST <= self._opnum <= rop._GUARD_FOLDABLE_LAST + return rop._GUARD_FOLDABLE_FIRST <= self.getopnum() <= rop._GUARD_FOLDABLE_LAST def is_guard_exception(self): - return (self._opnum == rop.GUARD_EXCEPTION or - self._opnum == rop.GUARD_NO_EXCEPTION) + return (self.getopnum() == rop.GUARD_EXCEPTION or + self.getopnum() == rop.GUARD_NO_EXCEPTION) def is_guard_overflow(self): - return (self._opnum == rop.GUARD_OVERFLOW or - self._opnum == rop.GUARD_NO_OVERFLOW) + return (self.getopnum() == rop.GUARD_OVERFLOW or + self.getopnum() == rop.GUARD_NO_OVERFLOW) def is_always_pure(self): - return rop._ALWAYS_PURE_FIRST <= self._opnum <= rop._ALWAYS_PURE_LAST + return rop._ALWAYS_PURE_FIRST <= self.getopnum() <= rop._ALWAYS_PURE_LAST def has_no_side_effect(self): - return rop._NOSIDEEFFECT_FIRST <= self._opnum <= rop._NOSIDEEFFECT_LAST + return rop._NOSIDEEFFECT_FIRST <= self.getopnum() <= rop._NOSIDEEFFECT_LAST def can_raise(self): - return rop._CANRAISE_FIRST <= self._opnum <= rop._CANRAISE_LAST + return rop._CANRAISE_FIRST <= self.getopnum() <= rop._CANRAISE_LAST def is_ovf(self): - return rop._OVF_FIRST <= self._opnum <= rop._OVF_LAST + return rop._OVF_FIRST <= self.getopnum() <= rop._OVF_LAST def is_comparison(self): return self.is_always_pure() and self.returns_bool_result() def is_final(self): - return rop._FINAL_FIRST <= self._opnum <= rop._FINAL_LAST + return rop._FINAL_FIRST <= self.getopnum() <= rop._FINAL_LAST def returns_bool_result(self): - opnum = self._opnum + opnum = self.getopnum() if we_are_translated(): assert opnum >= 0 elif opnum < 0: return False # for tests return opboolresult[opnum] + + # ____________________________________________________________ _oplist = [ Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/oparser.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/oparser.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/oparser.py Fri Sep 17 16:59:02 2010 @@ -238,13 +238,13 @@ rvar = self.box_for_var(res) self.vars[res] = rvar res = ResOperation(opnum, args, rvar, descr) - res.fail_args = fail_args + res.setfailargs(fail_args) return res def parse_op_no_result(self, line): opnum, args, descr, fail_args = self.parse_op(line) res = ResOperation(opnum, args, None, descr) - res.fail_args = fail_args + res.setfailargs(fail_args) return res def parse_next_op(self, line): Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_basic.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_basic.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_basic.py Fri Sep 17 16:59:02 2010 @@ -296,7 +296,7 @@ found = 0 for op in get_stats().loops[0]._all_operations(): if op.getopname() == 'guard_true': - liveboxes = op.fail_args + liveboxes = op.getfailargs() assert len(liveboxes) == 3 for box in liveboxes: assert isinstance(box, history.BoxInt) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_loop.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_loop.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_loop.py Fri Sep 17 16:59:02 2010 @@ -178,7 +178,7 @@ found = 0 for op in get_stats().loops[0]._all_operations(): if op.getopname() == 'guard_true': - liveboxes = op.fail_args + liveboxes = op.getfailargs() assert len(liveboxes) == 2 # x, y (in some order) assert isinstance(liveboxes[0], history.BoxInt) assert isinstance(liveboxes[1], history.BoxInt) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_oparser.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_oparser.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_oparser.py Fri Sep 17 16:59:02 2010 @@ -31,7 +31,7 @@ loop = parse(x, None, locals()) assert len(loop.operations) == 1 assert loop.operations[0].getdescr() - assert loop.operations[0].fail_args == [] + assert loop.operations[0].getfailargs() == [] def test_descr(): class Xyz(AbstractDescr): Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py Fri Sep 17 16:59:02 2010 @@ -50,11 +50,11 @@ fdescr.rd_snapshot = resume.Snapshot(snapshot0, [b1]) # opt.store_final_boxes_in_guard(op) - if op.fail_args == [b0, b1]: + if op.getfailargs() == [b0, b1]: assert fdescr.rd_numb.nums == [tag(1, TAGBOX)] assert fdescr.rd_numb.prev.nums == [tag(0, TAGBOX)] else: - assert op.fail_args == [b1, b0] + assert op.getfailargs() == [b1, b0] assert fdescr.rd_numb.nums == [tag(0, TAGBOX)] assert fdescr.rd_numb.prev.nums == [tag(1, TAGBOX)] assert fdescr.rd_virtuals is None @@ -152,14 +152,14 @@ remap[op2.result] = op1.result if op1.getopnum() != rop.JUMP: # xxx obscure assert op1.getdescr() == op2.getdescr() - if op1.fail_args or op2.fail_args: - assert len(op1.fail_args) == len(op2.fail_args) + if op1.getfailargs() or op2.getfailargs(): + assert len(op1.getfailargs()) == len(op2.getfailargs()) if strict_fail_args: - for x, y in zip(op1.fail_args, op2.fail_args): + for x, y in zip(op1.getfailargs(), op2.getfailargs()): assert x == remap.get(y, y) else: - fail_args1 = set(op1.fail_args) - fail_args2 = set([remap.get(y, y) for y in op2.fail_args]) + fail_args1 = set(op1.getfailargs()) + fail_args2 = set([remap.get(y, y) for y in op2.getfailargs()]) assert fail_args1 == fail_args2 assert len(oplist1) == len(oplist2) print '-'*57 @@ -211,7 +211,7 @@ self.metainterp_sd = metainterp_sd self.original_greenkey = original_greenkey def store_final_boxes(self, op, boxes): - op.fail_args = boxes + op.setfailargs(boxes) def __eq__(self, other): return type(self) is type(other) # xxx obscure @@ -2326,7 +2326,7 @@ from pypy.jit.metainterp.test.test_resume import ResumeDataFakeReader from pypy.jit.metainterp.test.test_resume import MyMetaInterp guard_op, = [op for op in self.loop.operations if op.is_guard()] - fail_args = guard_op.fail_args + fail_args = guard_op.getfailargs() fdescr = guard_op.getdescr() assert fdescr.guard_opnum == guard_opnum reader = ResumeDataFakeReader(fdescr, fail_args, Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_virtualref.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_virtualref.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_virtualref.py Fri Sep 17 16:59:02 2010 @@ -72,10 +72,10 @@ ops = self.metainterp.staticdata.stats.loops[0].operations [guard_op] = [op for op in ops if op.getopnum() == rop.GUARD_NOT_FORCED] - bxs1 = [box for box in guard_op.fail_args + bxs1 = [box for box in guard_op.getfailargs() if str(box._getrepr_()).endswith('.X')] assert len(bxs1) == 1 - bxs2 = [box for box in guard_op.fail_args + bxs2 = [box for box in guard_op.getfailargs() if str(box._getrepr_()).endswith('JitVirtualRef')] assert len(bxs2) == 1 JIT_VIRTUAL_REF = self.vrefinfo.JIT_VIRTUAL_REF @@ -84,9 +84,9 @@ # try reloading from blackhole.py's point of view from pypy.jit.metainterp.resume import ResumeDataDirectReader cpu = self.metainterp.cpu - cpu.get_latest_value_count = lambda : len(guard_op.fail_args) - cpu.get_latest_value_int = lambda i:guard_op.fail_args[i].getint() - cpu.get_latest_value_ref = lambda i:guard_op.fail_args[i].getref_base() + cpu.get_latest_value_count = lambda : len(guard_op.getfailargs()) + cpu.get_latest_value_int = lambda i:guard_op.getfailargs()[i].getint() + cpu.get_latest_value_ref = lambda i:guard_op.getfailargs()[i].getref_base() cpu.clear_latest_values = lambda count: None resumereader = ResumeDataDirectReader(cpu, guard_op.getdescr()) vrefinfo = self.metainterp.staticdata.virtualref_info From agaynor at codespeak.net Fri Sep 17 17:00:29 2010 From: agaynor at codespeak.net (agaynor at codespeak.net) Date: Fri, 17 Sep 2010 17:00:29 +0200 (CEST) Subject: [pypy-svn] r77156 - pypy/trunk/pypy/rlib/test Message-ID: <20100917150029.783EA282B9E@codespeak.net> Author: agaynor Date: Fri Sep 17 17:00:27 2010 New Revision: 77156 Modified: pypy/trunk/pypy/rlib/test/test_rgc.py Log: Fix 2 GC test failures. Modified: pypy/trunk/pypy/rlib/test/test_rgc.py ============================================================================== --- pypy/trunk/pypy/rlib/test/test_rgc.py (original) +++ pypy/trunk/pypy/rlib/test/test_rgc.py Fri Sep 17 17:00:27 2010 @@ -16,7 +16,7 @@ assert len(op.args) == 0 res = interpret(f, []) - + assert res is None def test_collect_0(): @@ -31,13 +31,13 @@ assert len(ops) == 1 op = ops[0][1] assert op.opname == 'gc__collect' - assert len(op.args) == 1 + assert len(op.args) == 1 assert op.args[0].value == 0 res = interpret(f, []) - - assert res is None - + + assert res is None + def test_can_move(): T0 = lltype.GcStruct('T') T1 = lltype.GcArray(lltype.Float) @@ -53,9 +53,9 @@ assert len(res) == 2 res = interpret(f, [1]) - + assert res == True - + def test_ll_arraycopy_1(): TYPE = lltype.GcArray(lltype.Signed) a1 = lltype.malloc(TYPE, 10) @@ -168,7 +168,7 @@ x1 = X() x1.stuff = X() x2 = X() - lst = rgc._get_referents(rgc.cast_instance_to_gcref(x1)) + lst = rgc.get_rpy_referents(rgc.cast_instance_to_gcref(x1)) lst2 = [rgc.try_cast_gcref_to_instance(X, x) for x in lst] assert x1.stuff in lst2 assert x2 not in lst2 @@ -177,5 +177,5 @@ class X(object): pass x1 = X() - n = rgc._get_memory_usage(rgc.cast_instance_to_gcref(x1)) + n = rgc.get_rpy_memory_usage(rgc.cast_instance_to_gcref(x1)) assert n >= 8 and n <= 64 From agaynor at codespeak.net Fri Sep 17 17:06:35 2010 From: agaynor at codespeak.net (agaynor at codespeak.net) Date: Fri, 17 Sep 2010 17:06:35 +0200 (CEST) Subject: [pypy-svn] r77157 - pypy/trunk/pypy/rlib/test Message-ID: <20100917150635.72F87282B9E@codespeak.net> Author: agaynor Date: Fri Sep 17 17:06:34 2010 New Revision: 77157 Modified: pypy/trunk/pypy/rlib/test/test_rgc.py Log: Kill this test, it can't be run on CPython. Modified: pypy/trunk/pypy/rlib/test/test_rgc.py ============================================================================== --- pypy/trunk/pypy/rlib/test/test_rgc.py (original) +++ pypy/trunk/pypy/rlib/test/test_rgc.py Fri Sep 17 17:06:34 2010 @@ -154,14 +154,6 @@ for i in range(3): assert s2.vars[i] == 50 + i - -def test_get_objects(): - class X(object): - pass - x1 = X() - lst = rgc._get_objects() - assert rgc.cast_instance_to_gcref(x1) in lst - def test_get_referents(): class X(object): __slots__ = ['stuff'] From arigo at codespeak.net Fri Sep 17 17:45:00 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 17 Sep 2010 17:45:00 +0200 (CEST) Subject: [pypy-svn] r77158 - pypy/build/bot2/pypybuildbot Message-ID: <20100917154500.B82EF282B9E@codespeak.net> Author: arigo Date: Fri Sep 17 17:44:59 2010 New Revision: 77158 Modified: pypy/build/bot2/pypybuildbot/master.py Log: Start the Mac OS/X tests earlier during the night, so that they are finished at around 7am. Modified: pypy/build/bot2/pypybuildbot/master.py ============================================================================== --- pypy/build/bot2/pypybuildbot/master.py (original) +++ pypy/build/bot2/pypybuildbot/master.py Fri Sep 17 17:44:59 2010 @@ -183,6 +183,7 @@ 'schedulers': [ Nightly("nightly-0-45", [ JITBENCH, # on tannit -- nothing else there during first round! + MACOSX32, # on minime ], hour=0, minute=45), Nightly("nightly-4-00", [ # rule: what we pick here on tannit should take at most 8 cores @@ -191,7 +192,6 @@ JITLINUX32, # on tannit32, uses 1 core JITLINUX64, # on tannit64, uses 1 core OJITLINUX32, # on tannit32, uses 1 core - MACOSX32, # on minime APPLVLWIN32, # on bigboard STACKLESSAPPLVLFREEBSD64, # on headless ], hour=4, minute=0), From antocuni at codespeak.net Fri Sep 17 17:49:53 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Fri, 17 Sep 2010 17:49:53 +0200 (CEST) Subject: [pypy-svn] r77159 - in pypy/branch/jitffi/pypy: jit/codewriter rlib rlib/test Message-ID: <20100917154953.57FC9282B9E@codespeak.net> Author: antocuni Date: Fri Sep 17 17:49:51 2010 New Revision: 77159 Modified: pypy/branch/jitffi/pypy/jit/codewriter/support.py pypy/branch/jitffi/pypy/rlib/libffi.py pypy/branch/jitffi/pypy/rlib/test/test_libffi.py Log: add an oopspec to FuncPtr.{push_arg,call}, so that the jit can recognize it Modified: pypy/branch/jitffi/pypy/jit/codewriter/support.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/codewriter/support.py (original) +++ pypy/branch/jitffi/pypy/jit/codewriter/support.py Fri Sep 17 17:49:51 2010 @@ -8,6 +8,7 @@ from pypy.rpython.ootypesystem import rdict as oo_rdict from pypy.rpython.llinterp import LLInterpreter from pypy.rpython.extregistry import ExtRegistryEntry +from pypy.rpython.annlowlevel import cast_base_ptr_to_instance from pypy.translator.simplify import get_funcobj from pypy.translator.unsimplify import split_block from pypy.objspace.flow.model import Constant @@ -217,6 +218,21 @@ else: return x + +# libffi support +# -------------- + +def _ll_2_libffi_push_arg(llfunc, value): + from pypy.rlib.libffi import FuncPtr + func = cast_base_ptr_to_instance(FuncPtr, llfunc) + return func.push_arg(value) + +def _ll_2_libffi_call(llfunc, RES_TP): + from pypy.rlib.libffi import FuncPtr + func = cast_base_ptr_to_instance(FuncPtr, llfunc) + return func.call(lltype.Float) # XXX: should be RES_TP, but it doesn't work + + # in the following calls to builtins, the JIT is allowed to look inside: inline_calls_to = [ ('int_floordiv_ovf_zer', [lltype.Signed, lltype.Signed], lltype.Signed), Modified: pypy/branch/jitffi/pypy/rlib/libffi.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/libffi.py (original) +++ pypy/branch/jitffi/pypy/rlib/libffi.py Fri Sep 17 17:49:51 2010 @@ -511,6 +511,7 @@ self.ll_args[self.pushed_args]) self.pushed_args += 1 push_arg._annspecialcase_ = 'specialize:argtype(1)' + push_arg.oopspec = 'libffi_push_arg(self, value)' def _check_args(self): if self.pushed_args < self.argnum: @@ -533,6 +534,7 @@ check_fficall_result(ffires, self.flags) return res call._annspecialcase_ = 'specialize:arg(1)' + call.oopspec = 'libffi_call(self, RES_TP)' def __del__(self): if self.ll_args: Modified: pypy/branch/jitffi/pypy/rlib/test/test_libffi.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/test/test_libffi.py (original) +++ pypy/branch/jitffi/pypy/rlib/test/test_libffi.py Fri Sep 17 17:49:51 2010 @@ -18,6 +18,15 @@ ffistruct = globals()[name] rffi.cast(rffi.VOIDP, ffistruct) +def get_libm_name(platform): + if platform == 'win32': + return 'msvcrt.dll' + elif platform == "darwin": + return 'libm.dylib' + else: + return 'libm.so' + + class TestLibffi: def setup_method(self, meth): ALLOCATED.clear() @@ -26,12 +35,7 @@ return CDLL(get_libc_name()) def get_libm(self): - if sys.platform == 'win32': - return CDLL('msvcrt.dll') - elif sys.platform == "darwin": - return CDLL('libm.dylib') - else: - return CDLL('libm.so') + return CDLL(get_libm_name(sys.platform)) def test_library_open(self): lib = self.get_libc() From cfbolz at codespeak.net Fri Sep 17 20:56:15 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Fri, 17 Sep 2010 20:56:15 +0200 (CEST) Subject: [pypy-svn] r77161 - in pypy/branch/better-map-instances/pypy/objspace/std: . test Message-ID: <20100917185615.15ECB282B9E@codespeak.net> Author: cfbolz Date: Fri Sep 17 20:56:14 2010 New Revision: 77161 Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py Log: a bug, but not the one we are looking for Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Fri Sep 17 20:56:14 2010 @@ -273,7 +273,7 @@ class BaseMapdictObject: # slightly evil to make it inherit from W_Root - _mixin_ = True # XXX hack hack hack + _mixin_ = True def _init_empty(self, map): raise NotImplementedError("abstract base class") @@ -318,7 +318,8 @@ w_dict = check_new_dictionary(space, w_dict) w_olddict = self.getdict() assert isinstance(w_dict, W_DictMultiObject) - w_olddict._as_rdict() + if w_olddict.r_dict_content is None: + w_olddict._as_rdict() flag = self._get_mapdict_map().write(self, ("dict", SPECIAL), w_dict) assert flag Modified: pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py Fri Sep 17 20:56:14 2010 @@ -408,7 +408,6 @@ assert obj2.getdictvalue(space, "b") is w6 assert obj2.map is abmap - # ___________________________________________________________ # integration tests @@ -508,6 +507,15 @@ assert a.__dict__ is d assert isinstance(a, B) + def test_dict_devolved_bug(self): + class A(object): + pass + a = A() + a.x = 1 + d = a.__dict__ + d[1] = 3 + a.__dict__ = {} + def test_change_class_slots(self): skip("not supported by pypy yet") class A(object): From hakanardo at codespeak.net Fri Sep 17 22:40:23 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Fri, 17 Sep 2010 22:40:23 +0200 (CEST) Subject: [pypy-svn] r77163 - pypy/trunk/pypy/module/array Message-ID: <20100917204023.B6604282BAD@codespeak.net> Author: hakanardo Date: Fri Sep 17 22:40:22 2010 New Revision: 77163 Modified: pypy/trunk/pypy/module/array/interp_array.py Log: faster tostring() Modified: pypy/trunk/pypy/module/array/interp_array.py ============================================================================== --- pypy/trunk/pypy/module/array/interp_array.py (original) +++ pypy/trunk/pypy/module/array/interp_array.py Fri Sep 17 22:40:22 2010 @@ -528,12 +528,15 @@ def array_tostring__Array(space, self): cbuf = self.charbuf() - s = '' - i = 0 - while i < self.len * mytype.bytes: - s += cbuf[i] - i += 1 + s = ''.join([cbuf[i] for i in xrange(self.len * mytype.bytes)]) return self.space.wrap(s) +## +## s = '' +## i = 0 +## while i < self.len * mytype.bytes: +## s += cbuf[i] +## i += 1 +## return self.space.wrap(s) def array_fromfile__Array_ANY_ANY(space, self, w_f, w_n): if not isinstance(w_f, W_File): From cfbolz at codespeak.net Sat Sep 18 00:20:11 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Sat, 18 Sep 2010 00:20:11 +0200 (CEST) Subject: [pypy-svn] r77164 - in pypy/branch/better-map-instances/pypy/objspace/std: . test Message-ID: <20100917222011.9C345282B9E@codespeak.net> Author: cfbolz Date: Sat Sep 18 00:20:10 2010 New Revision: 77164 Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py Log: found a bug: when calling a.__dict__.clear(), things can go wrong. Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Sat Sep 18 00:20:10 2010 @@ -70,12 +70,13 @@ oldattr = obj._get_mapdict_map() if not jit.we_are_jitted(): oldattr._size_estimate += attr.size_estimate() - oldattr.size_estimate() + assert oldattr.size_estimate() >= oldattr.length() if attr.length() > obj._mapdict_storage_length(): # note that attr.size_estimate() is always at least attr.length() new_storage = [None] * attr.size_estimate() for i in range(obj._mapdict_storage_length()): new_storage[i] = obj._mapdict_read_storage(i) - obj._set_mapdict_storage(new_storage) + obj._set_mapdict_storage_and_map(new_storage, attr) # the order is important here: first change the map, then the storage, # for the benefit of the special subclasses @@ -260,8 +261,7 @@ def _become(w_obj, new_obj): # this is like the _become method, really, but we cannot use that due to # RPython reasons - w_obj._set_mapdict_map(new_obj.map) - w_obj._set_mapdict_storage(new_obj.storage) + w_obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) # ____________________________________________________________ # object implementation @@ -279,8 +279,7 @@ raise NotImplementedError("abstract base class") def _become(self, new_obj): - self._set_mapdict_map(new_obj.map) - self._set_mapdict_storage(new_obj.storage) + self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) def _get_mapdict_map(self): return jit.hint(self.map, promote=True) @@ -373,8 +372,9 @@ self.storage[index] = value def _mapdict_storage_length(self): return len(self.storage) - def _set_mapdict_storage(self, storage): + def _set_mapdict_storage_and_map(self, storage, map): self.storage = storage + self.map = map class Object(ObjectMixin, BaseMapdictObject, W_Root): pass # mainly for tests @@ -453,7 +453,8 @@ return len(self._mapdict_get_storage_list()) + n - 1 return n - def _set_mapdict_storage(self, storage): + def _set_mapdict_storage_and_map(self, storage, map): + self.map = map len_storage = len(storage) for i in rangenmin1: if i < len_storage: @@ -462,8 +463,14 @@ erased = rerased.erase(None) setattr(self, "_value%s" % i, erased) if len_storage < n: + assert not self._has_storage_list() erased = rerased.erase(None) elif len_storage == n: + assert not self._has_storage_list() + erased = rerased.erase(storage[nmin1]) + elif not self._has_storage_list(): + # storage is longer than self.map.length() only due to + # overallocation erased = rerased.erase(storage[nmin1]) else: storage_list = storage[nmin1:] Modified: pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/test/test_mapdict.py Sat Sep 18 00:20:10 2010 @@ -516,6 +516,27 @@ d[1] = 3 a.__dict__ = {} + def test_dict_clear_bug(self): + class A(object): + pass + a = A() + a.x1 = 1 + a.x2 = 1 + a.x3 = 1 + a.x4 = 1 + a.x5 = 1 + for i in range(100): # change _size_estimate of w_A.terminator + a1 = A() + a1.x1 = 1 + a1.x2 = 1 + a1.x3 = 1 + a1.x4 = 1 + a1.x5 = 1 + d = a.__dict__ + d.clear() + a.__dict__ = {1: 1} + assert d == {} + def test_change_class_slots(self): skip("not supported by pypy yet") class A(object): From cfbolz at codespeak.net Sat Sep 18 00:32:15 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Sat, 18 Sep 2010 00:32:15 +0200 (CEST) Subject: [pypy-svn] r77165 - in pypy/branch/better-map-instances/pypy: interpreter objspace/std Message-ID: <20100917223215.00192282B9E@codespeak.net> Author: cfbolz Date: Sat Sep 18 00:32:14 2010 New Revision: 77165 Modified: pypy/branch/better-map-instances/pypy/interpreter/baseobjspace.py pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Log: - add assert - fix translation Modified: pypy/branch/better-map-instances/pypy/interpreter/baseobjspace.py ============================================================================== --- pypy/branch/better-map-instances/pypy/interpreter/baseobjspace.py (original) +++ pypy/branch/better-map-instances/pypy/interpreter/baseobjspace.py Sat Sep 18 00:32:14 2010 @@ -177,7 +177,7 @@ raise NotImplementedError def _mapdict_storage_length(self): raise NotImplementedError - def _set_mapdict_storage(self, storage): + def _set_mapdict_storage_and_map(self, storage, map): raise NotImplementedError Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Sat Sep 18 00:32:14 2010 @@ -472,6 +472,9 @@ # storage is longer than self.map.length() only due to # overallocation erased = rerased.erase(storage[nmin1]) + # in theory, we should be ultra-paranoid and check all entries, + # but checking just one should catch most problems anyway: + assert storage[n + 1] is None else: storage_list = storage[nmin1:] erased = rerased.erase_fixedsizelist(storage_list, W_Root) From benjamin at codespeak.net Sat Sep 18 04:48:39 2010 From: benjamin at codespeak.net (benjamin at codespeak.net) Date: Sat, 18 Sep 2010 04:48:39 +0200 (CEST) Subject: [pypy-svn] r77166 - pypy/trunk/pypy/jit/metainterp/optimizeopt Message-ID: <20100918024839.C9200282B9E@codespeak.net> Author: benjamin Date: Sat Sep 18 04:48:37 2010 New Revision: 77166 Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/__init__.py pypy/trunk/pypy/jit/metainterp/optimizeopt/heap.py pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py pypy/trunk/pypy/jit/metainterp/optimizeopt/virtualize.py Log: death to sibling imports Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/__init__.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/__init__.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/__init__.py Sat Sep 18 04:48:37 2010 @@ -1,8 +1,8 @@ -from optimizer import Optimizer -from rewrite import OptRewrite -from intbounds import OptIntBounds -from virtualize import OptVirtualize -from heap import OptHeap +from pypy.jit.metainterp.optimizeopt.optimizer import Optimizer +from pypy.jit.metainterp.optimizeopt.rewrite import OptRewrite +from pypy.jit.metainterp.optimizeopt.intbounds import OptIntBounds +from pypy.jit.metainterp.optimizeopt.virtualize import OptVirtualize +from pypy.jit.metainterp.optimizeopt.heap import OptHeap def optimize_loop_1(metainterp_sd, loop, virtuals=True): """Optimize loop.operations to make it match the input of loop.specnodes Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/heap.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/heap.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/heap.py Sat Sep 18 04:48:37 2010 @@ -2,7 +2,7 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.rlib.objectmodel import we_are_translated -from optimizer import Optimization +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization class CachedArrayItems(object): def __init__(self): Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py Sat Sep 18 04:48:37 2010 @@ -1,4 +1,4 @@ -from optimizer import Optimization, CONST_1, CONST_0 +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization, CONST_1, CONST_0 from pypy.jit.metainterp.optimizeutil import _findall from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded, \ IntLowerBound Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py Sat Sep 18 04:48:37 2010 @@ -11,7 +11,7 @@ from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.rpython.lltypesystem import lltype from pypy.jit.metainterp.history import AbstractDescr, make_hashable_int -from intutils import IntBound, IntUnbounded +from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded LEVEL_UNKNOWN = '\x00' LEVEL_NONNULL = '\x01' Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py Sat Sep 18 04:48:37 2010 @@ -1,4 +1,4 @@ -from optimizer import * +from pypy.jit.metainterp.optimizeopt.optimizer import * from pypy.jit.metainterp.resoperation import opboolinvers, opboolreflex from pypy.jit.metainterp.history import ConstInt from pypy.jit.metainterp.optimizeutil import _findall Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/virtualize.py Sat Sep 18 04:48:37 2010 @@ -6,7 +6,7 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.optimizeutil import _findall from pypy.rlib.objectmodel import we_are_translated -from optimizer import * +from pypy.jit.metainterp.optimizeopt.optimizer import * class AbstractVirtualValue(OptValue): From benjamin at codespeak.net Sat Sep 18 04:55:08 2010 From: benjamin at codespeak.net (benjamin at codespeak.net) Date: Sat, 18 Sep 2010 04:55:08 +0200 (CEST) Subject: [pypy-svn] r77167 - pypy/trunk/pypy/jit/metainterp/optimizeopt Message-ID: <20100918025508.E121E282B9E@codespeak.net> Author: benjamin Date: Sat Sep 18 04:55:07 2010 New Revision: 77167 Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py Log: this default could just as well be None Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py Sat Sep 18 04:55:07 2010 @@ -187,7 +187,7 @@ class Optimizer(Optimization): - def __init__(self, metainterp_sd, loop, optimizations=[], virtuals=True): + def __init__(self, metainterp_sd, loop, optimizations=None, virtuals=True): self.metainterp_sd = metainterp_sd self.cpu = metainterp_sd.cpu self.loop = loop @@ -199,10 +199,8 @@ self.pure_operations = args_dict() self.producer = {} self.pendingfields = [] - - if len(optimizations) == 0: - self.first_optimization = self - else: + + if optimizations: self.first_optimization = optimizations[0] for i in range(1, len(optimizations)): optimizations[i - 1].next_optimization = optimizations[i] @@ -210,6 +208,8 @@ for o in optimizations: o.optimizer = self o.setup(virtuals) + else: + self.first_optimization = self def forget_numberings(self, virtualbox): self.metainterp_sd.profiler.count(jitprof.OPT_FORCINGS) From benjamin at codespeak.net Sat Sep 18 04:56:26 2010 From: benjamin at codespeak.net (benjamin at codespeak.net) Date: Sat, 18 Sep 2010 04:56:26 +0200 (CEST) Subject: [pypy-svn] r77168 - pypy/trunk/pypy/jit/metainterp/optimizeopt Message-ID: <20100918025626.5B7B5282B9E@codespeak.net> Author: benjamin Date: Sat Sep 18 04:56:24 2010 New Revision: 77168 Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py Log: remove comma Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py Sat Sep 18 04:56:24 2010 @@ -351,9 +351,9 @@ if op.opnum == rop.GUARD_VALUE: if self.getvalue(op.args[0]) in self.bool_boxes: # Hack: turn guard_value(bool) into guard_true/guard_false. - # This is done after the operation is emitted, to let - # store_final_boxes_in_guard set the guard_opnum field - # of the descr to the original rop.GUARD_VALUE. + # This is done after the operation is emitted to let + # store_final_boxes_in_guard set the guard_opnum field of the + # descr to the original rop.GUARD_VALUE. constvalue = op.args[1].getint() if constvalue == 0: opnum = rop.GUARD_FALSE From benjamin at codespeak.net Sat Sep 18 05:09:51 2010 From: benjamin at codespeak.net (benjamin at codespeak.net) Date: Sat, 18 Sep 2010 05:09:51 +0200 (CEST) Subject: [pypy-svn] r77169 - pypy/trunk/pypy/jit/metainterp/optimizeopt Message-ID: <20100918030951.390F1282B9E@codespeak.net> Author: benjamin Date: Sat Sep 18 05:09:49 2010 New Revision: 77169 Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py Log: speling Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py Sat Sep 18 05:09:49 2010 @@ -5,7 +5,7 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation class OptRewrite(Optimization): - """Rewrite operations into equvivialent, cheeper operations. + """Rewrite operations into equvivialent, cheaper operations. This includes already executed operations and constants. """ From cfbolz at codespeak.net Sat Sep 18 10:17:01 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Sat, 18 Sep 2010 10:17:01 +0200 (CEST) Subject: [pypy-svn] r77170 - pypy/branch/better-map-instances/pypy/objspace/std Message-ID: <20100918081701.A3C98282B90@codespeak.net> Author: cfbolz Date: Sat Sep 18 10:16:59 2010 New Revision: 77170 Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Log: this is all a bit too subtle: the assert (with n + 1) was wrong. Also call ._has_storage_list() only once Modified: pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py ============================================================================== --- pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py (original) +++ pypy/branch/better-map-instances/pypy/objspace/std/mapdict.py Sat Sep 18 10:16:59 2010 @@ -462,19 +462,20 @@ else: erased = rerased.erase(None) setattr(self, "_value%s" % i, erased) + has_storage_list = self._has_storage_list() if len_storage < n: - assert not self._has_storage_list() + assert not has_storage_list erased = rerased.erase(None) elif len_storage == n: - assert not self._has_storage_list() + assert not has_storage_list erased = rerased.erase(storage[nmin1]) - elif not self._has_storage_list(): + elif not has_storage_list: # storage is longer than self.map.length() only due to # overallocation erased = rerased.erase(storage[nmin1]) # in theory, we should be ultra-paranoid and check all entries, # but checking just one should catch most problems anyway: - assert storage[n + 1] is None + assert storage[n] is None else: storage_list = storage[nmin1:] erased = rerased.erase_fixedsizelist(storage_list, W_Root) From cfbolz at codespeak.net Sat Sep 18 10:33:16 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Sat, 18 Sep 2010 10:33:16 +0200 (CEST) Subject: [pypy-svn] r77171 - pypy/trunk/pypy/jit/metainterp/optimizeopt Message-ID: <20100918083316.B0333282B90@codespeak.net> Author: cfbolz Date: Sat Sep 18 10:33:15 2010 New Revision: 77171 Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py Log: 77169 is quite funny Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py Sat Sep 18 10:33:15 2010 @@ -5,7 +5,7 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation class OptRewrite(Optimization): - """Rewrite operations into equvivialent, cheaper operations. + """Rewrite operations into equivalent, cheaper operations. This includes already executed operations and constants. """ From hakanardo at codespeak.net Sat Sep 18 12:39:43 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Sat, 18 Sep 2010 12:39:43 +0200 (CEST) Subject: [pypy-svn] r77172 - pypy/branch/jit-loop-invaraints Message-ID: <20100918103943.CAB90282B9E@codespeak.net> Author: hakanardo Date: Sat Sep 18 12:39:42 2010 New Revision: 77172 Added: pypy/branch/jit-loop-invaraints/ (props changed) - copied from r77171, pypy/trunk/ Log: Move some loop invariant code out of the loops by placing them in preamble inserted before each call to the loop. From arigo at codespeak.net Sat Sep 18 14:46:24 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sat, 18 Sep 2010 14:46:24 +0200 (CEST) Subject: [pypy-svn] r77173 - in pypy/trunk/pypy/module/gc: . test Message-ID: <20100918124624.0E56D282B9E@codespeak.net> Author: arigo Date: Sat Sep 18 14:46:23 2010 New Revision: 77173 Added: pypy/trunk/pypy/module/gc/app_referents.py (contents, props changed) pypy/trunk/pypy/module/gc/test/test_app_referents.py (contents, props changed) Modified: pypy/trunk/pypy/module/gc/__init__.py pypy/trunk/pypy/module/gc/referents.py Log: Add app-level logic around rpy_dump_heap() to also accept a file name or a file object. Modified: pypy/trunk/pypy/module/gc/__init__.py ============================================================================== --- pypy/trunk/pypy/module/gc/__init__.py (original) +++ pypy/trunk/pypy/module/gc/__init__.py Sat Sep 18 14:46:23 2010 @@ -17,6 +17,9 @@ def __init__(self, space, w_name): if (not space.config.translating or space.config.translation.gctransformer == "framework"): + self.appleveldefs.update({ + 'dump_rpy_heap': 'app_referents.dump_rpy_heap', + }) self.interpleveldefs.update({ 'get_rpy_roots': 'referents.get_rpy_roots', 'get_rpy_referents': 'referents.get_rpy_referents', @@ -25,7 +28,7 @@ 'get_objects': 'referents.get_objects', 'get_referents': 'referents.get_referents', 'get_referrers': 'referents.get_referrers', - 'dump_rpy_heap': 'referents.dump_rpy_heap', + '_dump_rpy_heap': 'referents._dump_rpy_heap', 'GcRef': 'referents.W_GcRef', }) MixedModule.__init__(self, space, w_name) Added: pypy/trunk/pypy/module/gc/app_referents.py ============================================================================== --- (empty file) +++ pypy/trunk/pypy/module/gc/app_referents.py Sat Sep 18 14:46:23 2010 @@ -0,0 +1,29 @@ +# NOT_RPYTHON + +import gc + +def dump_rpy_heap(file): + """Write a full dump of the objects in the heap to the given file + (which can be a file, a file name, or a file descritor). + Format for each object (each item is one machine word): + + [addr] [typeindex] [size] [addr1]..[addrn] [-1] + + where [addr] is the address of the object, [typeindex] and [size] + are as get_rpy_type_index() and get_rpy_memory_usage() would return, + and [addr1]..[addrn] are addresses of other objects that this object + points to. The full dump is a list of such objects, with a marker + [0][0][0][-1] inserted after all GC roots, before all non-roots. + """ + if isinstance(file, str): + f = open(file, 'wb') + gc._dump_rpy_heap(f.fileno()) + f.close() + else: + if isinstance(file, int): + fd = file + else: + if hasattr(file, 'flush'): + file.flush() + fd = file.fileno() + gc._dump_rpy_heap(fd) Modified: pypy/trunk/pypy/module/gc/referents.py ============================================================================== --- pypy/trunk/pypy/module/gc/referents.py (original) +++ pypy/trunk/pypy/module/gc/referents.py Sat Sep 18 14:46:23 2010 @@ -149,20 +149,9 @@ return space.newlist(result_w.keys()) get_referrers.unwrap_spec = [ObjSpace, 'args_w'] -def dump_rpy_heap(space, fd): - """Write a full dump of the objects in the heap to the given file - descriptor. Format for each object (each item is one machine word): - - [addr] [typeindex] [size] [addr1]..[addrn] [-1] - - where [addr] is the address of the object, [typeindex] and [size] - are as get_rpy_type_index() and get_rpy_memory_usage() would return, - and [addr1]..[addrn] are addresses of other objects that this object - points to. The full dump is a list of such objects, with a marker - [0][0][0][-1] inserted after all GC roots, before all non-roots. - """ +def _dump_rpy_heap(space, fd): try: rgc.dump_rpy_heap(fd) except OSError, e: raise wrap_oserror(space, e) -dump_rpy_heap.unwrap_spec = [ObjSpace, int] +_dump_rpy_heap.unwrap_spec = [ObjSpace, int] Added: pypy/trunk/pypy/module/gc/test/test_app_referents.py ============================================================================== --- (empty file) +++ pypy/trunk/pypy/module/gc/test/test_app_referents.py Sat Sep 18 14:46:23 2010 @@ -0,0 +1,39 @@ +import py, os +from pypy.tool.udir import udir + + +def test_interface_to_dump_rpy_heap_str(space): + filename = str(udir.join('dump_rpy_heap.str')) + try: + space.appexec([space.wrap(filename)], """(filename): + import gc + gc.dump_rpy_heap(filename)""") + except NotImplementedError: + pass + assert os.path.exists(filename) + +def test_interface_to_dump_rpy_heap_file(space): + filename = str(udir.join('dump_rpy_heap.file')) + w_f = space.appexec([space.wrap(filename)], """(filename): + import gc + f = open(filename, 'wb') + f.write('X') + return f""") + assert os.path.getsize(filename) == 0 # the 'X' was not flushed yet + try: + space.appexec([w_f], """(f): + import gc + gc.dump_rpy_heap(f)""") + except NotImplementedError: + pass + assert os.path.getsize(filename) == 1 # the 'X' was flushed here + +def test_interface_to_dump_rpy_heap_fd(space): + filename = str(udir.join('dump_rpy_heap.fd')) + f = open(filename, 'wb') + try: + space.appexec([space.wrap(f.fileno())], """(fd): + import gc + gc.dump_rpy_heap(fd)""") + except NotImplementedError: + pass From arigo at codespeak.net Sat Sep 18 15:38:49 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sat, 18 Sep 2010 15:38:49 +0200 (CEST) Subject: [pypy-svn] r77174 - pypy/trunk/pypy/module/_ssl/test Message-ID: <20100918133849.E15D5282B9E@codespeak.net> Author: arigo Date: Sat Sep 18 15:38:48 2010 New Revision: 77174 Modified: pypy/trunk/pypy/module/_ssl/test/test_ssl.py Log: Use https://codespeak.net/ in these tests, instead of another url, which seems sometimes to not support SSL and give obscure errors. Modified: pypy/trunk/pypy/module/_ssl/test/test_ssl.py ============================================================================== --- pypy/trunk/pypy/module/_ssl/test/test_ssl.py (original) +++ pypy/trunk/pypy/module/_ssl/test/test_ssl.py Sat Sep 18 15:38:48 2010 @@ -60,8 +60,8 @@ cls.space = space def setup_method(self, method): - # https://connect.sigen-ca.si/index-en.html - ADDR = "connect.sigen-ca.si", 443 + # https://codespeak.net/ + ADDR = "codespeak.net", 443 self.w_s = self.space.appexec([self.space.wrap(ADDR)], """(ADDR): import socket From arigo at codespeak.net Sat Sep 18 16:32:51 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sat, 18 Sep 2010 16:32:51 +0200 (CEST) Subject: [pypy-svn] r77175 - pypy/branch/gen2-gc/pypy/rpython/memory/gc Message-ID: <20100918143251.5699D282B9E@codespeak.net> Author: arigo Date: Sat Sep 18 16:32:49 2010 New Revision: 77175 Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Log: Hah. Just because we did a measure of "pypy-c translate --rtype targetrpystonedalone" and got as the (visually) best result on a graph the value 1.82. Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py Sat Sep 18 16:32:49 2010 @@ -109,7 +109,7 @@ # the total size consumed; and after every minor collection, if the # total size is now more than 'major_collection_threshold' times, # we trigger the next major collection. - "major_collection_threshold": 1.75, + "major_collection_threshold": 1.82, } def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE, From arigo at codespeak.net Sat Sep 18 16:35:10 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sat, 18 Sep 2010 16:35:10 +0200 (CEST) Subject: [pypy-svn] r77176 - in pypy/trunk/pypy: config doc/discussion rlib rpython/lltypesystem rpython/memory rpython/memory/gc rpython/memory/gc/test rpython/memory/test translator/c translator/c/test Message-ID: <20100918143510.C8D0F282B9E@codespeak.net> Author: arigo Date: Sat Sep 18 16:35:08 2010 New Revision: 77176 Added: pypy/trunk/pypy/rpython/memory/gc/minimark.py - copied unchanged from r77175, pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimark.py pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py - copied unchanged from r77175, pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py pypy/trunk/pypy/rpython/memory/gc/test/test_minimark.py - copied unchanged from r77175, pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimark.py pypy/trunk/pypy/rpython/memory/gc/test/test_minimarkpage.py - copied unchanged from r77175, pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Modified: pypy/trunk/pypy/config/translationoption.py pypy/trunk/pypy/doc/discussion/finalizer-order.txt pypy/trunk/pypy/rlib/rstring.py pypy/trunk/pypy/rpython/lltypesystem/llarena.py pypy/trunk/pypy/rpython/memory/gc/base.py pypy/trunk/pypy/rpython/memory/gc/generation.py pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py pypy/trunk/pypy/rpython/memory/lltypelayout.py pypy/trunk/pypy/rpython/memory/support.py pypy/trunk/pypy/rpython/memory/test/test_gc.py pypy/trunk/pypy/rpython/memory/test/test_support.py pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py pypy/trunk/pypy/translator/c/funcgen.py pypy/trunk/pypy/translator/c/test/test_newgc.py Log: Merge the branch/gen2-gc containing the minimark GC. (Not used by default so far.) Modified: pypy/trunk/pypy/config/translationoption.py ============================================================================== --- pypy/trunk/pypy/config/translationoption.py (original) +++ pypy/trunk/pypy/config/translationoption.py Sat Sep 18 16:35:08 2010 @@ -52,7 +52,7 @@ # gc ChoiceOption("gc", "Garbage Collection Strategy", ["boehm", "ref", "marksweep", "semispace", "statistics", - "generation", "hybrid", "markcompact", "none"], + "generation", "hybrid", "markcompact", "minimark", "none"], "ref", requires={ "ref": [("translation.rweakref", False), # XXX ("translation.gctransformer", "ref")], @@ -65,6 +65,7 @@ "hybrid": [("translation.gctransformer", "framework")], "boehm": [("translation.gctransformer", "boehm")], "markcompact": [("translation.gctransformer", "framework")], + "minimark": [("translation.gctransformer", "framework")], }, cmdline="--gc"), ChoiceOption("gctransformer", "GC transformer that is used - internal", Modified: pypy/trunk/pypy/doc/discussion/finalizer-order.txt ============================================================================== --- pypy/trunk/pypy/doc/discussion/finalizer-order.txt (original) +++ pypy/trunk/pypy/doc/discussion/finalizer-order.txt Sat Sep 18 16:35:08 2010 @@ -133,8 +133,8 @@ that doesn't change the state of an object, we don't follow its children recursively. -In practice we can encode the 4 states with a single extra bit in the -header: +In practice, in the SemiSpace, Generation and Hybrid GCs, we can encode +the 4 states with a single extra bit in the header: ===== ============= ======== ==================== state is_forwarded? bit set? bit set in the copy? @@ -150,3 +150,17 @@ bit in the copy at the end, to clean up before the next collection (which means recursively bumping the state from 2 to 3 in the final loop). + +In the MiniMark GC, the objects don't move (apart from when they are +copied out of the nursery), but we use the flag GCFLAG_VISITED to mark +objects that survive, so we can also have a single extra bit for +finalizers: + + ===== ============== ============================ + state GCFLAG_VISITED GCFLAG_FINALIZATION_ORDERING + ===== ============== ============================ + 0 no no + 1 no yes + 2 yes yes + 3 yes no + ===== ============= ============================ Modified: pypy/trunk/pypy/rlib/rstring.py ============================================================================== --- pypy/trunk/pypy/rlib/rstring.py (original) +++ pypy/trunk/pypy/rlib/rstring.py Sat Sep 18 16:35:08 2010 @@ -46,7 +46,9 @@ # -------------- public API --------------------------------- -INIT_SIZE = 100 # XXX tweak +# the following number is the maximum size of an RPython unicode +# string that goes into the nursery of the minimark GC. +INIT_SIZE = 56 class AbstractStringBuilder(object): def __init__(self, init_size=INIT_SIZE): Modified: pypy/trunk/pypy/rpython/lltypesystem/llarena.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/llarena.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/llarena.py Sat Sep 18 16:35:08 2010 @@ -16,8 +16,11 @@ class Arena(object): object_arena_location = {} # {container: (arena, offset)} old_object_arena_location = weakref.WeakKeyDictionary() + _count_arenas = 0 def __init__(self, nbytes, zero): + Arena._count_arenas += 1 + self._arena_index = Arena._count_arenas self.nbytes = nbytes self.usagemap = array.array('c') self.objectptrs = {} # {offset: ptr-to-container} @@ -25,6 +28,9 @@ self.freed = False self.reset(zero) + def __repr__(self): + return '' % (self._arena_index, self.nbytes) + def reset(self, zero, start=0, size=None): self.check() if size is None: @@ -297,6 +303,7 @@ assert isinstance(arena_addr, fakearenaaddress) assert arena_addr.offset == 0 arena_addr.arena.reset(False) + assert not arena_addr.arena.objectptrs arena_addr.arena.freed = True def arena_reset(arena_addr, size, zero): @@ -357,6 +364,11 @@ # This only works with linux's madvise(), which is really not a memory # usage hint but a real command. It guarantees that after MADV_DONTNEED # the pages are cleared again. + + # Note that the trick of the general 'posix' section below, i.e. + # reading /dev/zero, does not seem to have the correct effect of + # lazily-allocating pages on all Linux systems. + from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo _eci = ExternalCompilationInfo(includes=['sys/mman.h']) Modified: pypy/trunk/pypy/rpython/memory/gc/base.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/base.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/base.py Sat Sep 18 16:35:08 2010 @@ -5,6 +5,7 @@ from pypy.rpython.memory.support import get_address_stack, get_address_deque from pypy.rpython.memory.support import AddressDict from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage +from pypy.rlib.rarithmetic import r_uint TYPEID_MAP = lltype.GcStruct('TYPEID_MAP', ('count', lltype.Signed), ('size', lltype.Signed), @@ -151,7 +152,7 @@ return False def set_max_heap_size(self, size): - pass + raise NotImplementedError def x_swap_pool(self, newpool): return newpool @@ -345,6 +346,7 @@ "generation": "generation.GenerationGC", "hybrid": "hybrid.HybridGC", "markcompact" : "markcompact.MarkCompactGC", + "minimark" : "minimark.MiniMarkGC", } try: modulename, classname = classes[config.translation.gc].split('.') @@ -356,10 +358,12 @@ GCClass = getattr(module, classname) return GCClass, GCClass.TRANSLATION_PARAMS -def read_from_env(varname): +def _read_float_and_factor_from_env(varname): import os value = os.environ.get(varname) if value: + if len(value) > 1 and value[-1] in 'bB': + value = value[:-1] realvalue = value[:-1] if value[-1] in 'kK': factor = 1024 @@ -371,7 +375,21 @@ factor = 1 realvalue = value try: - return int(float(realvalue) * factor) + return (float(realvalue), factor) except ValueError: pass - return -1 + return (0.0, 0) + +def read_from_env(varname): + value, factor = _read_float_and_factor_from_env(varname) + return int(value * factor) + +def read_uint_from_env(varname): + value, factor = _read_float_and_factor_from_env(varname) + return r_uint(value * factor) + +def read_float_from_env(varname): + value, factor = _read_float_and_factor_from_env(varname) + if factor != 1: + return 0.0 + return value Modified: pypy/trunk/pypy/rpython/memory/gc/generation.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/generation.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/generation.py Sat Sep 18 16:35:08 2010 @@ -449,7 +449,7 @@ # for the JIT: a minimal description of the write_barrier() method # (the JIT assumes it is of the shape - # "if newvalue.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") + # "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS def write_barrier(self, newvalue, addr_struct): Modified: pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py Sat Sep 18 16:35:08 2010 @@ -326,6 +326,27 @@ self.gc.collect() assert hash == self.gc.identityhash(self.stackroots[-1]) self.stackroots.pop() + # (6) ask for the hash of varsized objects, larger and larger + for i in range(10): + self.gc.collect() + p = self.malloc(VAR, i) + self.stackroots.append(p) + hash = self.gc.identityhash(p) + self.gc.collect() + assert hash == self.gc.identityhash(self.stackroots[-1]) + self.stackroots.pop() + + def test_memory_alignment(self): + A1 = lltype.GcArray(lltype.Char) + for i in range(50): + p1 = self.malloc(A1, i) + if i: + p1[i-1] = chr(i) + self.stackroots.append(p1) + self.gc.collect() + for i in range(1, 50): + p = self.stackroots[-50+i] + assert p[i-1] == chr(i) class TestSemiSpaceGC(DirectGCTest): from pypy.rpython.memory.gc.semispace import SemiSpaceGC as GCClass @@ -456,3 +477,13 @@ def test_varsized_from_prebuilt_gc(self): DirectGCTest.test_varsized_from_prebuilt_gc(self) test_varsized_from_prebuilt_gc.GC_PARAMS = {'space_size': 3 * 1024 * WORD} + + +class TestMiniMarkGCSimple(DirectGCTest): + from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass + from pypy.rpython.memory.gc.minimark import SimpleArenaCollection + # test the GC itself, providing a simple class for ArenaCollection + GC_PARAMS = {'ArenaCollectionClass': SimpleArenaCollection} + +class TestMiniMarkGCFull(DirectGCTest): + from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass Modified: pypy/trunk/pypy/rpython/memory/lltypelayout.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/lltypelayout.py (original) +++ pypy/trunk/pypy/rpython/memory/lltypelayout.py Sat Sep 18 16:35:08 2010 @@ -7,7 +7,7 @@ primitive_to_fmt = {lltype.Signed: "l", lltype.Unsigned: "L", lltype.Char: "c", - lltype.UniChar: "H", # maybe + lltype.UniChar: "i", # 4 bytes lltype.Bool: "B", lltype.Float: "d", llmemory.Address: "P", Modified: pypy/trunk/pypy/rpython/memory/support.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/support.py (original) +++ pypy/trunk/pypy/rpython/memory/support.py Sat Sep 18 16:35:08 2010 @@ -216,6 +216,24 @@ self.index_in_oldest = index + 1 return result + def foreach(self, callback, arg): + """Invoke 'callback(address, arg)' for all addresses in the deque. + Typically, 'callback' is a bound method and 'arg' can be None. + """ + chunk = self.oldest_chunk + index = self.index_in_oldest + while chunk is not self.newest_chunk: + while index < chunk_size: + callback(chunk.items[index], arg) + index += 1 + chunk = chunk.next + index = 0 + limit = self.index_in_newest + while index < limit: + callback(chunk.items[index], arg) + index += 1 + foreach._annspecialcase_ = 'specialize:arg(1)' + def delete(self): cur = self.oldest_chunk while cur: Modified: pypy/trunk/pypy/rpython/memory/test/test_gc.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/test/test_gc.py (original) +++ pypy/trunk/pypy/rpython/memory/test/test_gc.py Sat Sep 18 16:35:08 2010 @@ -26,8 +26,9 @@ class GCTest(object): GC_PARAMS = {} GC_CAN_MOVE = False - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True GC_CAN_SHRINK_ARRAY = False + GC_CAN_SHRINK_BIG_ARRAY = False def setup_class(cls): cls._saved_logstate = py.log._getstate() @@ -451,10 +452,10 @@ a = rgc.malloc_nonmovable(TP, 3) if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 - assert self.interpret(func, []) == int(self.GC_CANNOT_MALLOC_NONMOVABLE) + assert self.interpret(func, []) == int(self.GC_CAN_MALLOC_NONMOVABLE) def test_malloc_nonmovable_fixsize(self): S = lltype.GcStruct('S', ('x', lltype.Float)) @@ -465,37 +466,36 @@ rgc.collect() if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 except Exception, e: return 2 - assert self.interpret(func, []) == int(self.GC_CANNOT_MALLOC_NONMOVABLE) + assert self.interpret(func, []) == int(self.GC_CAN_MALLOC_NONMOVABLE) def test_shrink_array(self): from pypy.rpython.lltypesystem.rstr import STR - GC_CAN_SHRINK_ARRAY = self.GC_CAN_SHRINK_ARRAY - def f(n, m): + def f(n, m, gc_can_shrink_array): ptr = lltype.malloc(STR, n) ptr.hash = 0x62 ptr.chars[0] = 'A' ptr.chars[1] = 'B' ptr.chars[2] = 'C' ptr2 = rgc.ll_shrink_array(ptr, 2) - assert (ptr == ptr2) == GC_CAN_SHRINK_ARRAY + assert (ptr == ptr2) == gc_can_shrink_array rgc.collect() return ( ord(ptr2.chars[0]) + (ord(ptr2.chars[1]) << 8) + (len(ptr2.chars) << 16) + (ptr2.hash << 24)) - assert self.interpret(f, [3, 0]) == 0x62024241 - # don't test with larger numbers of top of the Hybrid GC, because - # the default settings make it a too-large varsized object that - # gets allocated outside the semispace - if not isinstance(self, TestHybridGC): - assert self.interpret(f, [12, 0]) == 0x62024241 + flag = self.GC_CAN_SHRINK_ARRAY + assert self.interpret(f, [3, 0, flag]) == 0x62024241 + # with larger numbers, it gets allocated outside the semispace + # with some GCs. + flag = self.GC_CAN_SHRINK_BIG_ARRAY + assert self.interpret(f, [12, 0, flag]) == 0x62024241 def test_tagged_simple(self): from pypy.rlib.objectmodel import UnboxedValue @@ -568,7 +568,7 @@ assert res == 111 def test_writebarrier_before_copy(self): - S = lltype.GcStruct('S') + S = lltype.GcStruct('S', ('x', lltype.Char)) TP = lltype.GcArray(lltype.Ptr(S)) def fn(): l = lltype.malloc(TP, 100) @@ -628,8 +628,9 @@ class TestSemiSpaceGC(GCTest, snippet.SemiSpaceGCTests): from pypy.rpython.memory.gc.semispace import SemiSpaceGC as GCClass GC_CAN_MOVE = True - GC_CANNOT_MALLOC_NONMOVABLE = True + GC_CAN_MALLOC_NONMOVABLE = False GC_CAN_SHRINK_ARRAY = True + GC_CAN_SHRINK_BIG_ARRAY = True class TestGrowingSemiSpaceGC(TestSemiSpaceGC): GC_PARAMS = {'space_size': 16*WORD} @@ -641,16 +642,15 @@ from pypy.rpython.memory.gc.markcompact import MarkCompactGC as GCClass GC_PARAMS = {'space_size': 65536+16384} GC_CAN_SHRINK_ARRAY = False + GC_CAN_SHRINK_BIG_ARRAY = False def test_finalizer_order(self): py.test.skip("Not implemented yet") - def test_writebarrier_before_copy(self): - py.test.skip("Not relevant, and crashes because llarena does not " - "support empty GcStructs") class TestHybridGC(TestGenerationalGC): from pypy.rpython.memory.gc.hybrid import HybridGC as GCClass - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True + GC_CAN_SHRINK_BIG_ARRAY = False def test_ref_from_rawmalloced_to_regular(self): import gc @@ -720,7 +720,7 @@ from pypy.rpython.memory.gc.hybrid import HybridGC as GCClass GC_CAN_MOVE = False # with this size of heap, stuff gets allocated # in 3rd gen. - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True GC_PARAMS = {'space_size': 48*WORD, 'min_nursery_size': 12*WORD, 'nursery_size': 12*WORD, @@ -764,3 +764,9 @@ def test_malloc_nonmovable_fixsize(self): py.test.skip("Not supported") + + +class TestMiniMarkGC(TestSemiSpaceGC): + from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass + GC_CAN_SHRINK_BIG_ARRAY = False + GC_CAN_MALLOC_NONMOVABLE = True Modified: pypy/trunk/pypy/rpython/memory/test/test_support.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/test/test_support.py (original) +++ pypy/trunk/pypy/rpython/memory/test/test_support.py Sat Sep 18 16:35:08 2010 @@ -113,6 +113,27 @@ deque.append(x) expected.append(x) + def test_foreach(self): + AddressDeque = get_address_deque(10) + ll = AddressDeque() + for num_entries in range(30, -1, -1): + addrs = [raw_malloc(llmemory.sizeof(lltype.Signed)) + for i in range(num_entries)] + for a in addrs: + ll.append(a) + + seen = [] + def callback(addr, fortytwo): + assert fortytwo == 42 + seen.append(addr) + + ll.foreach(callback, 42) + assert seen == addrs + for a in addrs: + b = ll.popleft() + assert a == b + assert not ll.non_empty() + def test_stack_annotate(): AddressStack = get_address_stack(60) Modified: pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py (original) +++ pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py Sat Sep 18 16:35:08 2010 @@ -47,7 +47,7 @@ gcpolicy = None stacklessgc = False GC_CAN_MOVE = False - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True taggedpointers = False def setup_class(cls): @@ -602,8 +602,8 @@ rgc.collect() if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 #except Exception, e: # return 2 @@ -611,7 +611,7 @@ def test_malloc_nonmovable(self): run = self.runner("malloc_nonmovable") - assert int(self.GC_CANNOT_MALLOC_NONMOVABLE) == run([]) + assert int(self.GC_CAN_MALLOC_NONMOVABLE) == run([]) def define_malloc_nonmovable_fixsize(cls): S = lltype.GcStruct('S', ('x', lltype.Float)) @@ -622,8 +622,8 @@ rgc.collect() if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 except Exception, e: return 2 @@ -631,7 +631,7 @@ def test_malloc_nonmovable_fixsize(self): run = self.runner("malloc_nonmovable_fixsize") - assert run([]) == int(self.GC_CANNOT_MALLOC_NONMOVABLE) + assert run([]) == int(self.GC_CAN_MALLOC_NONMOVABLE) def define_shrink_array(cls): from pypy.rpython.lltypesystem.rstr import STR @@ -680,7 +680,8 @@ class GenericMovingGCTests(GenericGCTests): GC_CAN_MOVE = True - GC_CANNOT_MALLOC_NONMOVABLE = True + GC_CAN_MALLOC_NONMOVABLE = False + GC_CAN_TEST_ID = False def define_many_ids(cls): class A(object): @@ -710,7 +711,8 @@ return f def test_many_ids(self): - py.test.skip("fails for bad reasons in lltype.py :-(") + if not self.GC_CAN_TEST_ID: + py.test.skip("fails for bad reasons in lltype.py :-(") run = self.runner("many_ids") run([]) @@ -856,7 +858,7 @@ # (and give fixedsize) def define_writebarrier_before_copy(cls): - S = lltype.GcStruct('S') + S = lltype.GcStruct('S', ('x', lltype.Char)) TP = lltype.GcArray(lltype.Ptr(S)) def fn(): l = lltype.malloc(TP, 100) @@ -1144,10 +1146,6 @@ GC_PARAMS = {'space_size': 4096*WORD} root_stack_depth = 200 - def test_writebarrier_before_copy(self): - py.test.skip("Not relevant, and crashes because llarena does not " - "support empty GcStructs") - class TestGenerationGC(GenericMovingGCTests): gcname = "generation" GC_CAN_SHRINK_ARRAY = True @@ -1379,7 +1377,7 @@ class TestHybridGC(TestGenerationGC): gcname = "hybrid" - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True class gcpolicy(gc.FrameworkGcPolicy): class transformerclass(framework.FrameworkGCTransformer): @@ -1444,6 +1442,21 @@ def test_malloc_nonmovable_fixsize(self): py.test.skip("not supported") + +class TestMiniMarkGC(TestHybridGC): + gcname = "minimark" + GC_CAN_TEST_ID = True + + class gcpolicy(gc.FrameworkGcPolicy): + class transformerclass(framework.FrameworkGCTransformer): + from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass + GC_PARAMS = {'nursery_size': 32*WORD, + 'page_size': 16*WORD, + 'arena_size': 64*WORD, + 'small_request_threshold': 5*WORD, + } + root_stack_depth = 200 + # ________________________________________________________________ # tagged pointers Modified: pypy/trunk/pypy/translator/c/funcgen.py ============================================================================== --- pypy/trunk/pypy/translator/c/funcgen.py (original) +++ pypy/trunk/pypy/translator/c/funcgen.py Sat Sep 18 16:35:08 2010 @@ -733,6 +733,8 @@ continue elif T == Signed: format.append('%ld') + elif T == Unsigned: + format.append('%lu') elif T == Float: format.append('%f') elif isinstance(T, Ptr) or T == Address: Modified: pypy/trunk/pypy/translator/c/test/test_newgc.py ============================================================================== --- pypy/trunk/pypy/translator/c/test/test_newgc.py (original) +++ pypy/trunk/pypy/translator/c/test/test_newgc.py Sat Sep 18 16:35:08 2010 @@ -19,7 +19,7 @@ removetypeptr = False taggedpointers = False GC_CAN_MOVE = False - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True GC_CAN_SHRINK_ARRAY = False _isolated_func = None @@ -112,6 +112,7 @@ def teardown_class(cls): if hasattr(cls.c_allfuncs, 'close_isolate'): cls.c_allfuncs.close_isolate() + cls.c_allfuncs = None def run(self, name, *args): if not args: @@ -691,8 +692,8 @@ rgc.collect() if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 except Exception, e: return 2 @@ -700,7 +701,7 @@ def test_malloc_nonmovable(self): res = self.run('malloc_nonmovable') - assert res == self.GC_CANNOT_MALLOC_NONMOVABLE + assert res == self.GC_CAN_MALLOC_NONMOVABLE def define_resizable_buffer(cls): from pypy.rpython.lltypesystem.rstr import STR @@ -1093,7 +1094,7 @@ gcpolicy = "semispace" should_be_moving = True GC_CAN_MOVE = True - GC_CANNOT_MALLOC_NONMOVABLE = True + GC_CAN_MALLOC_NONMOVABLE = False GC_CAN_SHRINK_ARRAY = True # for snippets @@ -1252,7 +1253,7 @@ class TestHybridGC(TestGenerationalGC): gcpolicy = "hybrid" should_be_moving = True - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True def test_gc_set_max_heap_size(self): py.test.skip("not implemented") @@ -1323,6 +1324,15 @@ res = self.run("adding_a_hash") assert res == 0 +class TestMiniMarkGC(TestSemiSpaceGC): + gcpolicy = "minimark" + should_be_moving = True + GC_CAN_MALLOC_NONMOVABLE = True + GC_CAN_SHRINK_ARRAY = True + + def test_gc_heap_stats(self): + py.test.skip("not implemented") + # ____________________________________________________________________ class TaggedPointersTest(object): @@ -1377,3 +1387,6 @@ class TestMarkCompactGCMostCompact(TaggedPointersTest, TestMarkCompactGC): removetypeptr = True + +class TestMiniMarkGCMostCompact(TaggedPointersTest, TestMiniMarkGC): + removetypeptr = True From agaynor at codespeak.net Sat Sep 18 22:48:07 2010 From: agaynor at codespeak.net (agaynor at codespeak.net) Date: Sat, 18 Sep 2010 22:48:07 +0200 (CEST) Subject: [pypy-svn] r77177 - in pypy/trunk/pypy/jit/metainterp: optimizeopt test Message-ID: <20100918204807.2A7DE282B9E@codespeak.net> Author: agaynor Date: Sat Sep 18 22:48:05 2010 New Revision: 77177 Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py Log: Propogate minimum length for strings. Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py Sat Sep 18 22:48:05 2010 @@ -26,7 +26,7 @@ b = v.intbound if b.has_lower and b.has_upper and b.lower == b.upper: v.make_constant(ConstInt(b.lower)) - + try: op = self.optimizer.producer[box] except KeyError: @@ -36,7 +36,7 @@ if opnum == value: func(self, op) break - + def optimize_GUARD_TRUE(self, op): self.emit_operation(op) self.propagate_bounds_backward(op.args[0]) @@ -48,7 +48,7 @@ v1 = self.getvalue(op.args[0]) v2 = self.getvalue(op.args[1]) self.emit_operation(op) - + r = self.getvalue(op.result) if v2.is_constant(): val = v2.box.getint() @@ -58,14 +58,14 @@ val = v1.box.getint() if val >= 0: r.intbound.intersect(IntBound(0,val)) - + def optimize_INT_SUB(self, op): v1 = self.getvalue(op.args[0]) v2 = self.getvalue(op.args[1]) self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(v1.intbound.sub_bound(v2.intbound)) - + def optimize_INT_ADD(self, op): v1 = self.getvalue(op.args[0]) v2 = self.getvalue(op.args[1]) @@ -94,7 +94,7 @@ self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(resbound) - + def optimize_INT_SUB_OVF(self, op): v1 = self.getvalue(op.args[0]) v2 = self.getvalue(op.args[1]) @@ -124,7 +124,7 @@ self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(resbound) - + def optimize_INT_LT(self, op): v1 = self.getvalue(op.args[0]) v2 = self.getvalue(op.args[1]) @@ -172,9 +172,9 @@ self.make_constant_int(op.result, 0) elif v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 0) - else: + else: self.emit_operation(op) - + def optimize_INT_NE(self, op): v1 = self.getvalue(op.args[0]) v2 = self.getvalue(op.args[1]) @@ -182,22 +182,24 @@ self.make_constant_int(op.result, 1) elif v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 1) - else: + else: self.emit_operation(op) - + def optimize_ARRAYLEN_GC(self, op): self.emit_operation(op) v1 = self.getvalue(op.result) v1.intbound.make_ge(IntLowerBound(0)) - + + optimize_STRLEN = optimize_ARRAYLEN_GC + def make_int_lt(self, args): v1 = self.getvalue(args[0]) v2 = self.getvalue(args[1]) if v1.intbound.make_lt(v2.intbound): - self.propagate_bounds_backward(args[0]) + self.propagate_bounds_backward(args[0]) if v2.intbound.make_gt(v1.intbound): self.propagate_bounds_backward(args[1]) - + def make_int_le(self, args): v1 = self.getvalue(args[0]) @@ -273,7 +275,7 @@ r = self.getvalue(op.result) b = r.intbound.sub_bound(v2.intbound) if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.args[0]) b = r.intbound.sub_bound(v1.intbound) if v2.intbound.intersect(b): self.propagate_bounds_backward(op.args[1]) @@ -284,10 +286,10 @@ r = self.getvalue(op.result) b = r.intbound.add_bound(v2.intbound) if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.args[0]) b = r.intbound.sub_bound(v1.intbound).mul(-1) if v2.intbound.intersect(b): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.args[1]) def propagate_bounds_INT_MUL(self, op): v1 = self.getvalue(op.args[0]) @@ -295,7 +297,7 @@ r = self.getvalue(op.result) b = r.intbound.div_bound(v2.intbound) if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.args[0]) b = r.intbound.div_bound(v1.intbound) if v2.intbound.intersect(b): self.propagate_bounds_backward(op.args[1]) @@ -306,4 +308,3 @@ optimize_ops = _findall(OptIntBounds, 'optimize_') propagate_bounds_ops = _findall(OptIntBounds, 'propagate_bounds_') - Modified: pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py Sat Sep 18 22:48:05 2010 @@ -3434,6 +3434,22 @@ """ self.optimize_loop(ops, 'Not, Not', expected) + def test_bound_strlen(self): + ops = """ + [p0] + i0 = strlen(p0) + i1 = int_ge(i0, 0) + guard_true(i1) [] + jump(p0) + """ + # The dead strlen will be eliminated be the backend. + expected = """ + [p0] + i0 = strlen(p0) + jump(p0) + """ + self.optimize_loop(ops, 'Not', expected) + def test_addsub_const(self): ops = """ [i0] From arigo at codespeak.net Mon Sep 20 09:34:11 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Sep 2010 09:34:11 +0200 (CEST) Subject: [pypy-svn] r77180 - pypy/trunk/pypy/doc/discussion Message-ID: <20100920073411.4C50E282BE3@codespeak.net> Author: arigo Date: Mon Sep 20 09:34:09 2010 New Revision: 77180 Modified: pypy/trunk/pypy/doc/discussion/finalizer-order.txt Log: Fix ReST. Modified: pypy/trunk/pypy/doc/discussion/finalizer-order.txt ============================================================================== --- pypy/trunk/pypy/doc/discussion/finalizer-order.txt (original) +++ pypy/trunk/pypy/doc/discussion/finalizer-order.txt Mon Sep 20 09:34:09 2010 @@ -163,4 +163,4 @@ 1 no yes 2 yes yes 3 yes no - ===== ============= ============================ + ===== ============== ============================ From fijal at codespeak.net Mon Sep 20 09:35:12 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Mon, 20 Sep 2010 09:35:12 +0200 (CEST) Subject: [pypy-svn] r77181 - pypy/benchmarks/own Message-ID: <20100920073512.524D9282BE3@codespeak.net> Author: fijal Date: Mon Sep 20 09:35:10 2010 New Revision: 77181 Added: pypy/benchmarks/own/aes.py pypy/benchmarks/own/crypto_slowaes.py Log: Add crypto_slowaes benchmark. Right now not enabled by default Added: pypy/benchmarks/own/aes.py ============================================================================== --- (empty file) +++ pypy/benchmarks/own/aes.py Mon Sep 20 09:35:10 2010 @@ -0,0 +1,635 @@ +#!/usr/bin/python +# +# aes.py: implements AES - Advanced Encryption Standard +# from the SlowAES project, http://code.google.com/p/slowaes/ +# +# Copyright (c) 2008 Josh Davis ( http://www.josh-davis.org ), +# Alex Martelli ( http://www.aleax.it ) +# Copyright (c) 2010 Marti Raudsepp +# +# Ported from C code written by Laurent Haan ( http://www.progressive-coding.com ) +# +# Licensed under the Apache License, Version 2.0 +# http://www.apache.org/licenses/ +# +import os +import sys +import math + +def append_PKCS7_padding(s): + """return s padded to a multiple of 16-bytes by PKCS7 padding""" + numpads = 16 - (len(s)%16) + return s + numpads*chr(numpads) + +def strip_PKCS7_padding(s): + """return s stripped of PKCS7 padding""" + if len(s)%16 or not s: + raise ValueError("String of len %d can't be PCKS7-padded" % len(s)) + numpads = ord(s[-1]) + if numpads > 16: + raise ValueError("String ending with %r can't be PCKS7-padded" % s[-1]) + return s[:-numpads] + +def galois_multiply(a, b): + """Galois multiplication of 8 bit integers a and b.""" + p = 0 + while b: + if b & 1: p ^= a + a <<= 1 + if a & 0x100: + a ^= 0x1b + b >>= 1 + return p & 0xff + +class AES(object): + # valid key sizes + keySize = dict(SIZE_128=16, SIZE_192=24, SIZE_256=32) + + # Rijndael S-box + sbox = [0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, + 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, + 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, + 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, + 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, + 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, + 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, + 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, + 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, + 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, + 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, + 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, + 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, + 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, + 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, + 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, + 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, + 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, + 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, + 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, + 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, + 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, + 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, + 0x54, 0xbb, 0x16] + + # Rijndael Inverted S-box + rsbox = [0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, + 0x9e, 0x81, 0xf3, 0xd7, 0xfb , 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, + 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb , 0x54, + 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, + 0x42, 0xfa, 0xc3, 0x4e , 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, + 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25 , 0x72, 0xf8, + 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, + 0x65, 0xb6, 0x92 , 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, + 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84 , 0x90, 0xd8, 0xab, + 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, + 0x45, 0x06 , 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, + 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b , 0x3a, 0x91, 0x11, 0x41, + 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, + 0x73 , 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, + 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e , 0x47, 0xf1, 0x1a, 0x71, 0x1d, + 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b , + 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, + 0xfe, 0x78, 0xcd, 0x5a, 0xf4 , 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, + 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f , 0x60, + 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, + 0x93, 0xc9, 0x9c, 0xef , 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, + 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61 , 0x17, 0x2b, + 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, + 0x21, 0x0c, 0x7d] + + # precompute Galois Field multiplication tables + # we only need these 7 tables for multiplying by 1, 2, 3, 9, 11, 13 and 14 + # see mixColumn function below + + galois_mult_1 = range(256) + galois_mult_2 = [galois_multiply(x, 2) for x in range(256)] + galois_mult_3 = [galois_multiply(x, 3) for x in range(256)] + galois_mult_9 = [galois_multiply(x, 9) for x in range(256)] + galois_mult_11 = [galois_multiply(x, 11) for x in range(256)] + galois_mult_13 = [galois_multiply(x, 13) for x in range(256)] + galois_mult_14 = [galois_multiply(x, 14) for x in range(256)] + + def rotate(self, word): + """ Rijndael's key schedule rotate operation. + + Rotate a word eight bits to the left: eg, rotate(1d2c3a4f) == 2c3a4f1d + Word is an char list of size 4 (32 bits overall). + """ + return word[1:] + word[:1] + + # Rijndael Rcon + Rcon = [0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, + 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, + 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, + 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, + 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, + 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, + 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, + 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, + 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, + 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, + 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, + 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, + 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, + 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, + 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, + 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, + 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, + 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, + 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, + 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, + 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, + 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, + 0xe8, 0xcb ] + + def core(self, word, iteration): + """Key schedule core.""" + # rotate the 32-bit word 8 bits to the left + word = self.rotate(word) + # apply S-Box substitution on all 4 parts of the 32-bit word + for i in xrange(4): + word[i] = self.sbox[word[i]] + # XOR the output of the rcon operation with i to the first part + # (leftmost) only + word[0] = word[0] ^ self.Rcon[iteration] + return word + + def transpose(self, state): + """Transpose a 4x4 matrix, e.g. mirror it by the main diagonal""" + # 0 1 2 3 -> 0 4 8 12 + # 4 5 6 7 -> 1 5 9 13 + # 8 9 10 11 -> 2 6 10 14 + # 12 13 14 15 -> 3 7 11 15 + result = [0] * 16 + + for i in xrange(4): + for j in xrange(4): + result[4*i + j] = state[4*j + i] + + return result + + def expandKey(self, key, size): + """Rijndael's key expansion. + + Expands an 128,192,256 key into an 176,208,240 bytes key + + expandedKey is a char list of large enough size, + key is the non-expanded key. + """ + + nbrRounds = self.get_rounds(size) + + # the expanded keySize + expandedKeySize = 16*(nbrRounds+1) + # current expanded keySize, in bytes + currentSize = 0 + rconIteration = 1 + expandedKey = [0] * expandedKeySize + + # set the 16, 24, 32 bytes of the expanded key to the input key + expandedKey[0:size] = key[0:size] + currentSize += size + + while currentSize < expandedKeySize: + # assign the previous 4 bytes to the temporary value t + t = expandedKey[currentSize-4:currentSize] + + # every 16,24,32 bytes we apply the core schedule to t + # and increment rconIteration afterwards + if currentSize % size == 0: + t = self.core(t, rconIteration) + rconIteration += 1 + # For 256-bit keys, we add an extra sbox to the calculation + if size == self.keySize["SIZE_256"] and ((currentSize % size) == 16): + for l in xrange(4): t[l] = self.sbox[t[l]] + + # We XOR t with the four-byte block 16,24,32 bytes before the new + # expanded key. This becomes the next four bytes in the expanded + # key. + for m in xrange(4): + expandedKey[currentSize] = expandedKey[currentSize - size] ^ \ + t[m] + currentSize += 1 + + # each addRoundKey step uses a transposed part of the expanded key. We + # pre-compute this work here instead + for round in range(nbrRounds+1): + roundKey = expandedKey[16*round:16*(round+1)] + roundKey = self.transpose(roundKey) + expandedKey[16*round:16*(round+1)] = roundKey + + return expandedKey + + def addRoundKey(self, state, expandedKey, roundKeyPointer): + """Adds (XORs) the expanded key to the state.""" + + for i in xrange(16): + state[i] ^= expandedKey[roundKeyPointer + i] + + return state + + # + # substitute all the values from the state with the value in the SBox + # using the state value as index for the SBox + # + def subBytes(self, state, isInv): + if isInv: box = self.rsbox + else: box = self.sbox + + state = [box[val] for val in state] + return state + + # iterate over the 4 rows and call shiftRow() with that row + def shiftRows(self, state, isInv): + # 1st row doesn't change + # 2nd row is shifted to left by 1 (wraps around) + # 3rd row is shifted to left by 2 + # 4th row by 3 + # + # 0 1 2 3 -> 0 1 2 3 + # 4 5 6 7 -> 5 6 7 4 + # 8 9 10 11 -> 10 11 8 9 + # 12 13 14 15 -> 15 12 13 14 + if not isInv: + state[ 4: 8] = [state[5], state[6], state[7], state[4]] + state[ 8:12] = [state[10], state[11], state[8], state[9]] + state[12:16] = [state[15], state[12], state[13], state[14]] + else: + state[ 4: 8] = [state[7], state[4], state[5], state[6]] + state[ 8:12] = [state[10], state[11], state[8], state[9]] + state[12:16] = [state[13], state[14], state[15], state[12]] + return state + + # galois multiplication of the 4x4 matrix + def mixColumns(self, state, isInv): + # iterate over the 4 columns + # state[0::4] accesses every 4th value, e.g. state[0], state[4], state[8], state[12] + # that is, the 0th column + + state[0::4] = self.mixColumn(state[0::4], isInv) + state[1::4] = self.mixColumn(state[1::4], isInv) + state[2::4] = self.mixColumn(state[2::4], isInv) + state[3::4] = self.mixColumn(state[3::4], isInv) + + return state + + # galois multiplication of 1 column of the 4x4 matrix + def mixColumn(self, column, isInv): + if isInv: + # choose multiplication tables + # decrypt: v0 = (v0 *14) ^ (v3 * 9) ^ (v2 *13) ^ (v1 *11) + mult0 = self.galois_mult_14 # multiply by 14 + mult1 = self.galois_mult_9 # multiply by 9 + mult2 = self.galois_mult_13 # ... + mult3 = self.galois_mult_11 + else: + # encrypt: v0 = (v0 * 2) ^ (v3 * 1) ^ (v2 * 1) ^ (v1 * 3) + mult0 = self.galois_mult_2 + mult1 = self.galois_mult_1 + mult2 = self.galois_mult_1 + mult3 = self.galois_mult_3 + + v0, v1, v2, v3 = column + + column[0] = mult0[v0] ^ mult1[v3] ^ mult2[v2] ^ mult3[v1] + column[1] = mult0[v1] ^ mult1[v0] ^ mult2[v3] ^ mult3[v2] + column[2] = mult0[v2] ^ mult1[v1] ^ mult2[v0] ^ mult3[v3] + column[3] = mult0[v3] ^ mult1[v2] ^ mult2[v1] ^ mult3[v0] + return column + + # applies the 4 operations of the forward round in sequence + def aes_round(self, state, expandedKey, roundKeyPointer): + state = self.subBytes(state, False) + state = self.shiftRows(state, False) + state = self.mixColumns(state, False) + state = self.addRoundKey(state, expandedKey, roundKeyPointer) + return state + + # applies the 4 operations of the inverse round in sequence + def aes_invRound(self, state, expandedKey, roundKeyPointer): + state = self.shiftRows(state, True) + state = self.subBytes(state, True) + state = self.addRoundKey(state, expandedKey, roundKeyPointer) + state = self.mixColumns(state, True) + return state + + # Perform the initial operations, the standard round, and the final + # operations of the forward aes, creating a round key for each round + def aes_main(self, state, expandedKey, nbrRounds): + state = self.addRoundKey(state, expandedKey, 0) + i = 1 + while i < nbrRounds: + state = self.aes_round(state, expandedKey, 16*i) + i += 1 + state = self.subBytes(state, False) + state = self.shiftRows(state, False) + state = self.addRoundKey(state, expandedKey, 16*nbrRounds) + return state + + # Perform the initial operations, the standard round, and the final + # operations of the inverse aes, creating a round key for each round + def aes_invMain(self, state, expandedKey, nbrRounds): + state = self.addRoundKey(state, expandedKey, 16*nbrRounds) + i = nbrRounds - 1 + while i > 0: + state = self.aes_invRound(state, expandedKey, 16*i) + i -= 1 + state = self.shiftRows(state, True) + state = self.subBytes(state, True) + state = self.addRoundKey(state, expandedKey, 0) + return state + + def get_rounds(self, size): + # set the number of rounds + if size == self.keySize["SIZE_128"]: return 10 + elif size == self.keySize["SIZE_192"]: return 12 + elif size == self.keySize["SIZE_256"]: return 14 + else: return None + + # encrypts a 128 bit input block against the given key of size specified + def encrypt(self, iput, expandedKey, size): + # the number of rounds + nbrRounds = self.get_rounds(size) + + # the 128 bit block to encode + block = self.transpose(iput) + + # encrypt the block using the expandedKey + block = self.aes_main(block, expandedKey, nbrRounds) + + output = self.transpose(block) + return output + + # decrypts a 128 bit input block against the given key of size specified + def decrypt(self, iput, expandedKey, size): + # the number of rounds + nbrRounds = self.get_rounds(size) + + # the 128 bit block to decode + block = self.transpose(iput) + + # decrypt the block using the expandedKey + block = self.aes_invMain(block, expandedKey, nbrRounds) + + output = self.transpose(block) + return output + + +class AESModeOfOperation(object): + + aes = AES() + + # structure of supported modes of operation + modeOfOperation = dict(OFB=0, CFB=1, CBC=2) + + # converts a 16 character string into a number array + def convertString(self, string, start, end, mode): + if end - start > 16: end = start + 16 + if mode == self.modeOfOperation["CBC"]: ar = [0] * 16 + else: ar = [] + + i = start + j = 0 + while len(ar) < end - start: + ar.append(0) + while i < end: + ar[j] = ord(string[i]) + j += 1 + i += 1 + return ar + + # Mode of Operation Encryption + # stringIn - Input String + # mode - mode of type modeOfOperation + # hexKey - a hex key of the bit length size + # size - the bit length of the key + # hexIV - the 128 bit hex Initilization Vector + def encrypt(self, stringIn, mode, key, size, IV): + if len(key) % size: + return None + if len(IV) % 16: + return None + # the AES input/output + plaintext = [] + iput = [0] * 16 + output = [] + ciphertext = [0] * 16 + # the output cipher string + cipherOut = [] + + expandedKey = self.aes.expandKey(key, size) + + # char firstRound + firstRound = True + if stringIn != None: + for j in xrange(int(math.ceil(float(len(stringIn))/16))): + start = j*16 + end = j*16+16 + if end > len(stringIn): + end = len(stringIn) + plaintext = self.convertString(stringIn, start, end, mode) + # print 'PT@%s:%s' % (j, plaintext) + if mode == self.modeOfOperation["CFB"]: + if firstRound: + output = self.aes.encrypt(IV, expandedKey, size) + firstRound = False + else: + output = self.aes.encrypt(iput, expandedKey, size) + for i in xrange(16): + if len(plaintext)-1 < i: + ciphertext[i] = 0 ^ output[i] + elif len(output)-1 < i: + ciphertext[i] = plaintext[i] ^ 0 + elif len(plaintext)-1 < i and len(output) < i: + ciphertext[i] = 0 ^ 0 + else: + ciphertext[i] = plaintext[i] ^ output[i] + for k in xrange(end-start): + cipherOut.append(ciphertext[k]) + iput = ciphertext + elif mode == self.modeOfOperation["OFB"]: + if firstRound: + output = self.aes.encrypt(IV, expandedKey, size) + firstRound = False + else: + output = self.aes.encrypt(iput, expandedKey, size) + for i in xrange(16): + if len(plaintext)-1 < i: + ciphertext[i] = 0 ^ output[i] + elif len(output)-1 < i: + ciphertext[i] = plaintext[i] ^ 0 + elif len(plaintext)-1 < i and len(output) < i: + ciphertext[i] = 0 ^ 0 + else: + ciphertext[i] = plaintext[i] ^ output[i] + for k in xrange(end-start): + cipherOut.append(ciphertext[k]) + iput = output + elif mode == self.modeOfOperation["CBC"]: + for i in xrange(16): + if firstRound: + iput[i] = plaintext[i] ^ IV[i] + else: + iput[i] = plaintext[i] ^ ciphertext[i] + # print 'IP@%s:%s' % (j, iput) + firstRound = False + ciphertext = self.aes.encrypt(iput, expandedKey, size) + # always 16 bytes because of the padding for CBC + for k in xrange(16): + cipherOut.append(ciphertext[k]) + return mode, len(stringIn), cipherOut + + # Mode of Operation Decryption + # cipherIn - Encrypted String + # originalsize - The unencrypted string length - required for CBC + # mode - mode of type modeOfOperation + # key - a number array of the bit length size + # size - the bit length of the key + # IV - the 128 bit number array Initilization Vector + def decrypt(self, cipherIn, originalsize, mode, key, size, IV): + # cipherIn = unescCtrlChars(cipherIn) + if len(key) % size: + return None + if len(IV) % 16: + return None + # the AES input/output + ciphertext = [] + iput = [] + output = [] + plaintext = [0] * 16 + # the output plain text string + stringOut = '' + + expandedKey = self.aes.expandKey(key, size) + + # char firstRound + firstRound = True + if cipherIn != None: + for j in xrange(int(math.ceil(float(len(cipherIn))/16))): + start = j*16 + end = j*16+16 + if j*16+16 > len(cipherIn): + end = len(cipherIn) + ciphertext = cipherIn[start:end] + if mode == self.modeOfOperation["CFB"]: + if firstRound: + output = self.aes.encrypt(IV, expandedKey, size) + firstRound = False + else: + output = self.aes.encrypt(iput, expandedKey, size) + for i in xrange(16): + if len(output)-1 < i: + plaintext[i] = 0 ^ ciphertext[i] + elif len(ciphertext)-1 < i: + plaintext[i] = output[i] ^ 0 + elif len(output)-1 < i and len(ciphertext) < i: + plaintext[i] = 0 ^ 0 + else: + plaintext[i] = output[i] ^ ciphertext[i] + for k in xrange(end-start): + stringOut += chr(plaintext[k]) + iput = ciphertext + elif mode == self.modeOfOperation["OFB"]: + if firstRound: + output = self.aes.encrypt(IV, expandedKey, size) + firstRound = False + else: + output = self.aes.encrypt(iput, expandedKey, size) + for i in xrange(16): + if len(output)-1 < i: + plaintext[i] = 0 ^ ciphertext[i] + elif len(ciphertext)-1 < i: + plaintext[i] = output[i] ^ 0 + elif len(output)-1 < i and len(ciphertext) < i: + plaintext[i] = 0 ^ 0 + else: + plaintext[i] = output[i] ^ ciphertext[i] + for k in xrange(end-start): + stringOut += chr(plaintext[k]) + iput = output + elif mode == self.modeOfOperation["CBC"]: + output = self.aes.decrypt(ciphertext, expandedKey, size) + for i in xrange(16): + if firstRound: + plaintext[i] = IV[i] ^ output[i] + else: + plaintext[i] = iput[i] ^ output[i] + firstRound = False + if originalsize is not None and originalsize < end: + for k in xrange(originalsize-start): + stringOut += chr(plaintext[k]) + else: + for k in xrange(end-start): + stringOut += chr(plaintext[k]) + iput = ciphertext + return stringOut + + +def encryptData(key, data, mode=AESModeOfOperation.modeOfOperation["CBC"]): + """encrypt `data` using `key` + + `key` should be a string of bytes. + + returned cipher is a string of bytes prepended with the initialization + vector. + + """ + key = map(ord, key) + if mode == AESModeOfOperation.modeOfOperation["CBC"]: + data = append_PKCS7_padding(data) + keysize = len(key) + assert keysize in AES.keySize.values(), 'invalid key size: %s' % keysize + # create a new iv using random data + iv = [ord(i) for i in os.urandom(16)] + moo = AESModeOfOperation() + (mode, length, ciph) = moo.encrypt(data, mode, key, keysize, iv) + # With padding, the original length does not need to be known. It's a bad + # idea to store the original message length. + # prepend the iv. + return ''.join(map(chr, iv)) + ''.join(map(chr, ciph)) + +def decryptData(key, data, mode=AESModeOfOperation.modeOfOperation["CBC"]): + """decrypt `data` using `key` + + `key` should be a string of bytes. + + `data` should have the initialization vector prepended as a string of + ordinal values. + + """ + + key = map(ord, key) + keysize = len(key) + assert keysize in AES.keySize.values(), 'invalid key size: %s' % keysize + # iv is first 16 bytes + iv = map(ord, data[:16]) + data = map(ord, data[16:]) + moo = AESModeOfOperation() + decr = moo.decrypt(data, None, mode, key, keysize, iv) + if mode == AESModeOfOperation.modeOfOperation["CBC"]: + decr = strip_PKCS7_padding(decr) + return decr + +def generateRandomKey(keysize): + """Generates a key from random data of length `keysize`. + + The returned key is a string of bytes. + + """ + if keysize not in (16, 24, 32): + emsg = 'Invalid keysize, %s. Should be one of (16, 24, 32).' + raise ValueError, emsg % keysize + return os.urandom(keysize) + +if __name__ == "__main__": + moo = AESModeOfOperation() + cleartext = "This is a test!" + cypherkey = [143,194,34,208,145,203,230,143,177,246,97,206,145,92,255,84] + iv = [103,35,148,239,76,213,47,118,255,222,123,176,106,134,98,92] + mode, orig_len, ciph = moo.encrypt(cleartext, moo.modeOfOperation["CBC"], + cypherkey, moo.aes.keySize["SIZE_128"], iv) + print 'm=%s, ol=%s (%s), ciph=%s' % (mode, orig_len, len(cleartext), ciph) + decr = moo.decrypt(ciph, orig_len, mode, cypherkey, + moo.aes.keySize["SIZE_128"], iv) + print decr Added: pypy/benchmarks/own/crypto_slowaes.py ============================================================================== --- (empty file) +++ pypy/benchmarks/own/crypto_slowaes.py Mon Sep 20 09:35:10 2010 @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import util +import optparse +import time + +import aes + +cleartext = "This is a test. What could possibly go wrong? " * 2000 # 92000 bytes + +def benchmark(): + moo = aes.AESModeOfOperation() + cypherkey = [143,194,34,208,145,203,230,143,177,246,97,206,145,92,255,84] + iv = [103,35,148,239,76,213,47,118,255,222,123,176,106,134,98,92] + mode, orig_len, ciph = moo.encrypt(cleartext, moo.modeOfOperation["CBC"], + cypherkey, moo.aes.keySize["SIZE_128"], iv) + decr = moo.decrypt(ciph, orig_len, mode, cypherkey, + moo.aes.keySize["SIZE_128"], iv) + + assert decr == cleartext + +def main(arg): + # XXX warmup + + times = [] + for i in xrange(arg): + t0 = time.time() + o = benchmark() + tk = time.time() + times.append(tk - t0) + print tk - t0 + return times + +if __name__ == "__main__": + parser = optparse.OptionParser( + usage="%prog [options]", + description="Test the performance of the SlowAES cipher benchmark") + util.add_standard_options_to(parser) + options, args = parser.parse_args() + + util.run_benchmark(options, options.num_runs, main) From arigo at codespeak.net Mon Sep 20 09:36:26 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Sep 2010 09:36:26 +0200 (CEST) Subject: [pypy-svn] r77182 - pypy/trunk/pypy/module/sys Message-ID: <20100920073626.2DF43282BE3@codespeak.net> Author: arigo Date: Mon Sep 20 09:36:24 2010 New Revision: 77182 Modified: pypy/trunk/pypy/module/sys/version.py Log: Add XXXes on the version numbers. Modified: pypy/trunk/pypy/module/sys/version.py ============================================================================== --- pypy/trunk/pypy/module/sys/version.py (original) +++ pypy/trunk/pypy/module/sys/version.py Mon Sep 20 09:36:24 2010 @@ -4,10 +4,11 @@ import os -CPYTHON_VERSION = (2, 5, 2, "beta", 42) -CPYTHON_API_VERSION = 1012 +#XXX # the release serial 42 is not in range(16) +CPYTHON_VERSION = (2, 5, 2, "beta", 42) #XXX # sync patchlevel.h +CPYTHON_API_VERSION = 1012 #XXX # sync with include/modsupport.h -PYPY_VERSION = (1, 3, 0, "beta", '?') +PYPY_VERSION = (1, 3, 0, "beta", '?') #XXX # sync patchlevel.h # the last item is replaced by the svn revision ^^^ TRIM_URL_UP_TO = 'svn/pypy/' From arigo at codespeak.net Mon Sep 20 09:36:53 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Sep 2010 09:36:53 +0200 (CEST) Subject: [pypy-svn] r77183 - pypy/trunk/pypy/rpython/memory/gc Message-ID: <20100920073653.0DE1E282BE3@codespeak.net> Author: arigo Date: Mon Sep 20 09:36:52 2010 New Revision: 77183 Modified: pypy/trunk/pypy/rpython/memory/gc/minimark.py Log: Fix the failing tests. Modified: pypy/trunk/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/minimark.py Mon Sep 20 09:36:52 2010 @@ -358,6 +358,7 @@ def _full_collect_if_needed(self, reserving_size): + reserving_size = llmemory.raw_malloc_usage(reserving_size) if (float(self.get_total_memory_used()) + reserving_size > self.next_major_collection_threshold): self.minor_collection() From arigo at codespeak.net Mon Sep 20 09:37:13 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Sep 2010 09:37:13 +0200 (CEST) Subject: [pypy-svn] r77184 - in pypy/trunk/pypy: module/gc rlib rpython/lltypesystem rpython/memory/gc rpython/memory/gctransform translator/c/src Message-ID: <20100920073713.52D22282BE3@codespeak.net> Author: arigo Date: Mon Sep 20 09:37:11 2010 New Revision: 77184 Modified: pypy/trunk/pypy/module/gc/referents.py pypy/trunk/pypy/rlib/rgc.py pypy/trunk/pypy/rpython/lltypesystem/lloperation.py pypy/trunk/pypy/rpython/memory/gc/inspect.py pypy/trunk/pypy/rpython/memory/gctransform/framework.py pypy/trunk/pypy/translator/c/src/mem.h Log: Boehm translation fix. Modified: pypy/trunk/pypy/module/gc/referents.py ============================================================================== --- pypy/trunk/pypy/module/gc/referents.py (original) +++ pypy/trunk/pypy/module/gc/referents.py Mon Sep 20 09:37:11 2010 @@ -2,7 +2,7 @@ from pypy.interpreter.baseobjspace import W_Root, Wrappable from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import ObjSpace -from pypy.interpreter.error import wrap_oserror +from pypy.interpreter.error import wrap_oserror, OperationError from pypy.rlib.objectmodel import we_are_translated @@ -33,8 +33,14 @@ gcref = rgc.cast_instance_to_gcref(w_obj) return gcref +def missing_operation(space): + return OperationError(space.w_NotImplementedError, + space.wrap("operation not implemented by this GC")) + def get_rpy_roots(space): lst = rgc.get_rpy_roots() + if lst is None: + raise missing_operation(space) return space.newlist([wrap(space, gcref) for gcref in lst if gcref]) def get_rpy_referents(space, w_obj): @@ -42,6 +48,8 @@ This is likely to contain a lot of GcRefs.""" gcref = unwrap(space, w_obj) lst = rgc.get_rpy_referents(gcref) + if lst is None: + raise missing_operation(space) return space.newlist([wrap(space, gcref) for gcref in lst]) def get_rpy_memory_usage(space, w_obj): @@ -49,6 +57,8 @@ This does not include the internal structures of the object.""" gcref = unwrap(space, w_obj) size = rgc.get_rpy_memory_usage(gcref) + if size < 0: + raise missing_operation(space) return space.wrap(size) def get_rpy_type_index(space, w_obj): @@ -57,6 +67,8 @@ file typeids.txt produced at translation.""" gcref = unwrap(space, w_obj) index = rgc.get_rpy_type_index(gcref) + if index < 0: + raise missing_operation(space) return space.wrap(index) def _list_w_obj_referents(gcref, result_w): @@ -151,7 +163,9 @@ def _dump_rpy_heap(space, fd): try: - rgc.dump_rpy_heap(fd) + ok = rgc.dump_rpy_heap(fd): except OSError, e: raise wrap_oserror(space, e) + if not ok: + raise missing_operation(space) _dump_rpy_heap.unwrap_spec = [ObjSpace, int] Modified: pypy/trunk/pypy/rlib/rgc.py ============================================================================== --- pypy/trunk/pypy/rlib/rgc.py (original) +++ pypy/trunk/pypy/rlib/rgc.py Mon Sep 20 09:37:11 2010 @@ -517,8 +517,8 @@ class Entry(ExtRegistryEntry): _about_ = dump_rpy_heap def compute_result_annotation(self, s_fd): - from pypy.annotation.model import s_None - return s_None + from pypy.annotation.model import s_Bool + return s_Bool def specialize_call(self, hop): vlist = hop.inputargs(lltype.Signed) hop.exception_is_here() Modified: pypy/trunk/pypy/rpython/lltypesystem/lloperation.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/lloperation.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/lloperation.py Mon Sep 20 09:37:11 2010 @@ -467,6 +467,13 @@ 'gc_writebarrier_before_copy': LLOp(canrun=True), 'gc_heap_stats' : LLOp(canunwindgc=True), + 'gc_get_rpy_roots' : LLOp(), + 'gc_get_rpy_referents': LLOp(), + 'gc_get_rpy_memory_usage': LLOp(), + 'gc_get_rpy_type_index': LLOp(), + 'gc_is_rpy_instance' : LLOp(), + 'gc_dump_rpy_heap' : LLOp(), + # ------- JIT & GC interaction, only for some GCs ---------- 'gc_adr_of_nursery_free' : LLOp(), Modified: pypy/trunk/pypy/rpython/memory/gc/inspect.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/inspect.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/inspect.py Mon Sep 20 09:37:11 2010 @@ -197,3 +197,4 @@ heapdumper.walk(heapdumper.pending) heapdumper.flush() heapdumper.delete() + return True Modified: pypy/trunk/pypy/rpython/memory/gctransform/framework.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gctransform/framework.py (original) +++ pypy/trunk/pypy/rpython/memory/gctransform/framework.py Mon Sep 20 09:37:11 2010 @@ -412,7 +412,7 @@ minimal_transform=False) self.dump_rpy_heap_ptr = getfn(inspect.dump_rpy_heap, [s_gc, annmodel.SomeInteger()], - annmodel.s_None, + annmodel.s_Bool, minimal_transform=False) self.set_max_heap_size_ptr = getfn(GCClass.set_max_heap_size.im_func, Modified: pypy/trunk/pypy/translator/c/src/mem.h ============================================================================== --- pypy/trunk/pypy/translator/c/src/mem.h (original) +++ pypy/trunk/pypy/translator/c/src/mem.h Mon Sep 20 09:37:11 2010 @@ -224,3 +224,13 @@ #define OP_CAST_PTR_TO_WEAKREFPTR(x, r) r = x #define OP_CAST_WEAKREFPTR_TO_PTR(x, r) r = x + +/************************************************************/ +/* dummy version of these operations, e.g. with Boehm */ + +#define OP_GC_GET_RPY_ROOTS(r) r = 0 +#define OP_GC_GET_RPY_REFERENTS(x, r) r = 0 +#define OP_GC_GET_RPY_MEMORY_USAGE(x, r) r = -1 +#define OP_GC_GET_RPY_TYPE_INDEX(x, r) r = -1 +#define OP_GC_IS_RPY_INSTANCE(x, r) r = 0 +#define OP_GC_DUMP_RPY_HEAP(r) r = 0 From arigo at codespeak.net Mon Sep 20 09:48:08 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Sep 2010 09:48:08 +0200 (CEST) Subject: [pypy-svn] r77185 - in pypy/trunk/pypy: rlib rpython rpython/lltypesystem rpython/memory rpython/memory/gc rpython/memory/gc/test rpython/memory/gctransform rpython/memory/test Message-ID: <20100920074808.2281D282BE3@codespeak.net> Author: arigo Date: Mon Sep 20 09:48:06 2010 New Revision: 77185 Modified: pypy/trunk/pypy/rlib/rarithmetic.py pypy/trunk/pypy/rpython/llinterp.py pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py pypy/trunk/pypy/rpython/lltypesystem/llarena.py pypy/trunk/pypy/rpython/lltypesystem/llheap.py pypy/trunk/pypy/rpython/lltypesystem/llmemory.py pypy/trunk/pypy/rpython/lltypesystem/lloperation.py pypy/trunk/pypy/rpython/memory/gc/base.py pypy/trunk/pypy/rpython/memory/gc/minimark.py pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py pypy/trunk/pypy/rpython/memory/gc/test/test_minimark.py pypy/trunk/pypy/rpython/memory/gctransform/framework.py pypy/trunk/pypy/rpython/memory/gcwrapper.py pypy/trunk/pypy/rpython/memory/test/test_gc.py pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py Log: Merge branch/gc-cardmark, adding card marking to the minimark GC. (the branch was only local to my working copy.) Modified: pypy/trunk/pypy/rlib/rarithmetic.py ============================================================================== --- pypy/trunk/pypy/rlib/rarithmetic.py (original) +++ pypy/trunk/pypy/rlib/rarithmetic.py Mon Sep 20 09:48:06 2010 @@ -50,6 +50,11 @@ LONG_MASK = _Ltest*2-1 LONG_TEST = _Ltest +LONG_BIT_SHIFT = 0 +while (1 << LONG_BIT_SHIFT) != LONG_BIT: + LONG_BIT_SHIFT += 1 + assert LONG_BIT_SHIFT < 99, "LONG_BIT_SHIFT value not found?" + INFINITY = 1e200 * 1e200 NAN = INFINITY / INFINITY Modified: pypy/trunk/pypy/rpython/llinterp.py ============================================================================== --- pypy/trunk/pypy/rpython/llinterp.py (original) +++ pypy/trunk/pypy/rpython/llinterp.py Mon Sep 20 09:48:06 2010 @@ -650,7 +650,7 @@ offsets, fieldvalue = fieldnamesval[:-1], fieldnamesval[-1] inneraddr, FIELD = self.getinneraddr(obj, *offsets) if FIELD is not lltype.Void: - self.heap.setinterior(obj, inneraddr, FIELD, fieldvalue) + self.heap.setinterior(obj, inneraddr, FIELD, fieldvalue, offsets) def op_bare_setinteriorfield(self, obj, *fieldnamesval): offsets, fieldvalue = fieldnamesval[:-1], fieldnamesval[-1] Modified: pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py Mon Sep 20 09:48:06 2010 @@ -353,6 +353,7 @@ """Put ctypes_storage on the instance, changing its __class__ so that it sees the methods of the given mixin class.""" assert not isinstance(instance, _parentable_mixin) # not yet + ctypes_storage._preserved_hash = hash(instance) subcls = get_common_subclass(mixin_cls, instance.__class__) instance.__class__ = subcls instance._storage = ctypes_storage @@ -402,6 +403,8 @@ def __hash__(self): if self._storage is not None: + if hasattr(self._storage, '_preserved_hash'): + return self._storage._preserved_hash return ctypes.addressof(self._storage) else: return object.__hash__(self) Modified: pypy/trunk/pypy/rpython/lltypesystem/llarena.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/llarena.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/llarena.py Mon Sep 20 09:48:06 2010 @@ -69,7 +69,7 @@ raise ArenaError("Address offset is outside the arena") return fakearenaaddress(self, offset) - def allocate_object(self, offset, size): + def allocate_object(self, offset, size, letter='x'): self.check() bytes = llmemory.raw_malloc_usage(size) if offset + bytes > self.nbytes: @@ -84,7 +84,7 @@ raise ArenaError("new object overlaps a previous object") assert offset not in self.objectptrs addr2 = size._raw_malloc([], zero=zero) - pattern = 'X' + 'x'*(bytes-1) + pattern = letter.upper() + letter*(bytes-1) self.usagemap[offset:offset+bytes] = array.array('c', pattern) self.setobject(addr2, offset, bytes) # common case: 'size' starts with a GCHeaderOffset. In this case @@ -324,10 +324,13 @@ this is used to know what type of lltype object to allocate.""" from pypy.rpython.memory.lltypelayout import memory_alignment addr = getfakearenaaddress(addr) - if check_alignment and (addr.offset & (memory_alignment-1)) != 0: + letter = 'x' + if llmemory.raw_malloc_usage(size) == 1: + letter = 'b' # for Byte-aligned allocations + elif check_alignment and (addr.offset & (memory_alignment-1)) != 0: raise ArenaError("object at offset %d would not be correctly aligned" % (addr.offset,)) - addr.arena.allocate_object(addr.offset, size) + addr.arena.allocate_object(addr.offset, size, letter) def arena_shrink_obj(addr, newsize): """ Mark object as shorter than it was @@ -471,6 +474,7 @@ sandboxsafe=True) def llimpl_arena_free(arena_addr): + # NB. minimark.py assumes that arena_free() is actually just a raw_free(). llmemory.raw_free(arena_addr) register_external(arena_free, [llmemory.Address], None, 'll_arena.arena_free', llimpl=llimpl_arena_free, Modified: pypy/trunk/pypy/rpython/lltypesystem/llheap.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/llheap.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/llheap.py Mon Sep 20 09:48:06 2010 @@ -8,7 +8,8 @@ from pypy.rlib.rgc import collect from pypy.rlib.rgc import can_move -def setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue): +def setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue, + offsets=None): assert typeOf(newvalue) == INNERTYPE # xxx access the address object's ref() directly for performance inneraddr.ref()[0] = newvalue Modified: pypy/trunk/pypy/rpython/lltypesystem/llmemory.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/llmemory.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/llmemory.py Mon Sep 20 09:48:06 2010 @@ -409,6 +409,9 @@ if self.ptr is None: s = 'NULL' else: + #try: + # s = hex(self.ptr._cast_to_int()) + #except: s = str(self.ptr) return '' % (s,) Modified: pypy/trunk/pypy/rpython/lltypesystem/lloperation.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/lloperation.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/lloperation.py Mon Sep 20 09:48:06 2010 @@ -436,6 +436,7 @@ 'do_malloc_fixedsize_clear':LLOp(canraise=(MemoryError,),canunwindgc=True), 'do_malloc_varsize_clear': LLOp(canraise=(MemoryError,),canunwindgc=True), 'get_write_barrier_failing_case': LLOp(sideeffects=False), + 'get_write_barrier_from_array_failing_case': LLOp(sideeffects=False), 'gc_get_type_info_group': LLOp(sideeffects=False), # __________ GC operations __________ Modified: pypy/trunk/pypy/rpython/memory/gc/base.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/base.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/base.py Mon Sep 20 09:48:06 2010 @@ -200,6 +200,39 @@ length -= 1 trace._annspecialcase_ = 'specialize:arg(2)' + def trace_partial(self, obj, start, stop, callback, arg): + """Like trace(), but only walk the array part, for indices in + range(start, stop). Must only be called if has_gcptr_in_varsize(). + """ + length = stop - start + typeid = self.get_type_id(obj) + if self.is_gcarrayofgcptr(typeid): + # a performance shortcut for GcArray(gcptr) + item = obj + llmemory.gcarrayofptr_itemsoffset + item += llmemory.gcarrayofptr_singleitemoffset * start + while length > 0: + if self.points_to_valid_gc_object(item): + callback(item, arg) + item += llmemory.gcarrayofptr_singleitemoffset + length -= 1 + return + ll_assert(self.has_gcptr_in_varsize(typeid), + "trace_partial() on object without has_gcptr_in_varsize()") + item = obj + self.varsize_offset_to_variable_part(typeid) + offsets = self.varsize_offsets_to_gcpointers_in_var_part(typeid) + itemlength = self.varsize_item_sizes(typeid) + item += itemlength * start + while length > 0: + j = 0 + while j < len(offsets): + itemobj = item + offsets[j] + if self.points_to_valid_gc_object(itemobj): + callback(itemobj, arg) + j += 1 + item += itemlength + length -= 1 + trace_partial._annspecialcase_ = 'specialize:arg(4)' + def points_to_valid_gc_object(self, addr): return self.is_valid_gc_object(addr.address[0]) Modified: pypy/trunk/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/minimark.py Mon Sep 20 09:48:06 2010 @@ -5,8 +5,10 @@ from pypy.rpython.memory.gc import minimarkpage, base, generation from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask, r_uint +from pypy.rlib.rarithmetic import LONG_BIT_SHIFT from pypy.rlib.debug import ll_assert, debug_print, debug_start, debug_stop from pypy.rlib.objectmodel import we_are_translated +from pypy.tool.sourcetools import func_with_new_name WORD = LONG_BIT // 8 NULL = llmemory.NULL @@ -40,6 +42,12 @@ # collection. See pypy/doc/discussion/finalizer-order.txt GCFLAG_FINALIZATION_ORDERING = first_gcflag << 4 +# The following flag is set on externally raw_malloc'ed arrays of pointers. +# They are allocated with some extra space in front of them for a bitfield, +# one bit per 'card_page_indices' indices. +GCFLAG_HAS_CARDS = first_gcflag << 5 +GCFLAG_CARDS_SET = first_gcflag << 6 # <- at least one card bit is set + FORWARDSTUB = lltype.GcStruct('forwarding_stub', ('forw', llmemory.Address)) @@ -110,6 +118,14 @@ # total size is now more than 'major_collection_threshold' times, # we trigger the next major collection. "major_collection_threshold": 1.82, + + # The number of array indices that are mapped to a single bit in + # write_barrier_from_array(). Must be a power of two. The default + # value of 128 means that card pages are 512 bytes (1024 on 64-bits) + # in regular arrays of pointers; more in arrays whose items are + # larger. A value of 0 disables card marking. + "card_page_indices": 128, + "card_page_indices_min": 800, # minimum number of indices for cards } def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE, @@ -119,6 +135,8 @@ arena_size=64*WORD, small_request_threshold=5*WORD, major_collection_threshold=2.5, + card_page_indices=0, + card_page_indices_min=None, ArenaCollectionClass=None): MovingGCBase.__init__(self, config, chunk_size) assert small_request_threshold % WORD == 0 @@ -130,6 +148,13 @@ self.max_heap_size = 0.0 self.max_heap_size_already_raised = False # + self.card_page_indices = card_page_indices + if self.card_page_indices > 0: + self.card_page_indices_min = card_page_indices_min + self.card_page_shift = 0 + while (1 << self.card_page_shift) < self.card_page_indices: + self.card_page_shift += 1 + # self.nursery = NULL self.nursery_free = NULL self.nursery_top = NULL @@ -145,6 +170,13 @@ # the write barrier. self.old_objects_pointing_to_young = self.AddressStack() # + # Similar to 'old_objects_pointing_to_young', but lists objects + # that have the GCFLAG_CARDS_SET bit. For large arrays. Note + # that it is possible for an object to be listed both in here + # and in 'old_objects_pointing_to_young', in which case we + # should just clear the cards and trace it fully, as usual. + self.old_objects_with_cards_set = self.AddressStack() + # # A list of all prebuilt GC objects that contain pointers to the heap self.prebuilt_root_objects = self.AddressStack() # @@ -292,7 +324,7 @@ # If totalsize is greater than small_request_threshold, ask for # a rawmalloc. if llmemory.raw_malloc_usage(totalsize) > self.small_request_threshold: - result = self._external_malloc(typeid, totalsize) + result = self._external_malloc_cardmark(typeid, totalsize, length) # else: # Round the size up to the next multiple of WORD. Note that @@ -364,51 +396,85 @@ self.minor_collection() self.major_collection(reserving_size) - def _reserve_external_memory(self, totalsize): - """Do a raw_malloc() to get some external memory. - Note that the returned memory is not cleared.""" - # - result = llmemory.raw_malloc(totalsize) - if not result: - raise MemoryError("cannot allocate large object") - # - size_gc_header = self.gcheaderbuilder.size_gc_header - self.rawmalloced_total_size += llmemory.raw_malloc_usage(totalsize) - self.rawmalloced_objects.append(result + size_gc_header) - return result - def _external_malloc(self, typeid, totalsize): """Allocate a large object using raw_malloc().""" + return self._external_malloc_cardmark(typeid, totalsize, 0) + + + def _external_malloc_cardmark(self, typeid, totalsize, length): + """Allocate a large object using raw_malloc(), possibly as an + object with card marking enabled, if its length is large enough. + 'length' can be specified as 0 if the object is not varsized.""" # - # If somebody calls _external_malloc() a lot, we must eventually + # If somebody calls this function a lot, we must eventually # force a full collection. self._full_collect_if_needed(totalsize) # - result = self._reserve_external_memory(totalsize) - llmemory.raw_memclear(result, totalsize) - self.init_gc_object(result, typeid, GCFLAG_NO_YOUNG_PTRS) + # Check if we need to introduce the card marker bits area. + if (self.card_page_indices <= 0 # <- this check is constant-folded + or length < self.card_page_indices_min # <- must be large enough + or not self.has_gcptr_in_varsize(typeid)): # <- must contain ptrs + # + # In these cases, we don't want a card marker bits area. + cardheadersize = 0 + extra_flags = 0 + # + else: + # Reserve N extra words containing card bits before the object. + extra_words = self.card_marking_words_for_length(length) + cardheadersize = WORD * extra_words + extra_flags = GCFLAG_HAS_CARDS + # + allocsize = cardheadersize + llmemory.raw_malloc_usage(totalsize) + # + # Allocate the object using arena_malloc(), which we assume here + # is just the same as raw_malloc(), but allows the extra flexibility + # of saying that we have extra words in the header. + arena = llarena.arena_malloc(allocsize, False) + if not arena: + raise MemoryError("cannot allocate large object") + # + # Clear it using method 2 of llarena.arena_reset(), which is the + # same as just a raw_memclear(). + llarena.arena_reset(arena, allocsize, 2) + # + # Reserve the card mark as a list of single bytes + # (the loop is empty in C). + i = 0 + while i < cardheadersize: + llarena.arena_reserve(arena + i, llmemory.sizeof(lltype.Char)) + i += 1 + # + # Initialize the object. + result = arena + cardheadersize + llarena.arena_reserve(result, totalsize) + self.init_gc_object(result, typeid, GCFLAG_NO_YOUNG_PTRS | extra_flags) + # + # Record the newly allocated object and its size. + size_gc_header = self.gcheaderbuilder.size_gc_header + self.rawmalloced_total_size += llmemory.raw_malloc_usage(totalsize) + self.rawmalloced_objects.append(result + size_gc_header) return result - _external_malloc._dont_inline_ = True + _external_malloc_cardmark._dont_inline_ = True def _malloc_nonmovable(self, typeid, totalsize): """Allocate an object non-movable.""" # - # If somebody calls _malloc_nonmovable() a lot, we must eventually - # force a full collection. - self._full_collect_if_needed(totalsize) - # rawtotalsize = llmemory.raw_malloc_usage(totalsize) - if rawtotalsize <= self.small_request_threshold: - # - # Ask the ArenaCollection to do the malloc. - totalsize = llarena.round_up_for_allocation(totalsize) - result = self.ac.malloc(totalsize) + if rawtotalsize > self.small_request_threshold: # - else: # The size asked for is too large for the ArenaCollection. - result = self._reserve_external_memory(totalsize) + return self._external_malloc(typeid, totalsize) + # + totalsize = llarena.round_up_for_allocation(totalsize) + # + # If somebody calls _malloc_nonmovable() a lot, we must eventually + # force a full collection. + self._full_collect_if_needed(totalsize) # + # Ask the ArenaCollection to do the malloc. + result = self.ac.malloc(totalsize) llmemory.raw_memclear(result, totalsize) self.init_gc_object(result, typeid, GCFLAG_NO_YOUNG_PTRS) return result @@ -446,6 +512,7 @@ # Only objects in the nursery can be "resized". Resizing them # means recording that they have a smaller size, so that when # moved out of the nursery, they will consume less memory. + # In particular, an array with GCFLAG_HAS_CARDS is never resized. if not self.is_in_nursery(obj): return False # @@ -492,7 +559,7 @@ gcref = self.malloc_varsize_nonmovable(typeid, length) else: gcref = self.malloc_fixedsize_nonmovable(typeid) - return gcref + return llmemory.cast_ptr_to_adr(gcref) # ---------- @@ -540,6 +607,24 @@ """ return self.ac.total_memory_used + self.rawmalloced_total_size + def card_marking_words_for_length(self, length): + # --- Unoptimized version: + #num_bits = ((length-1) >> self.card_page_shift) + 1 + #return (num_bits + (LONG_BIT - 1)) >> LONG_BIT_SHIFT + # --- Optimized version: + return intmask( + ((r_uint(length) + ((LONG_BIT << self.card_page_shift) - 1)) >> + (self.card_page_shift + LONG_BIT_SHIFT))) + + def card_marking_bytes_for_length(self, length): + # --- Unoptimized version: + #num_bits = ((length-1) >> self.card_page_shift) + 1 + #return (num_bits + 7) >> 3 + # --- Optimized version: + return intmask( + ((r_uint(length) + ((8 << self.card_page_shift) - 1)) >> + (self.card_page_shift + 3))) + def debug_check_object(self, obj): # after a minor or major collection, no object should be in the nursery ll_assert(not self.is_in_nursery(obj), @@ -557,6 +642,30 @@ # the GCFLAG_FINALIZATION_ORDERING should not be set between coll. ll_assert(self.header(obj).tid & GCFLAG_FINALIZATION_ORDERING == 0, "unexpected GCFLAG_FINALIZATION_ORDERING") + # the GCFLAG_CARDS_SET should not be set between collections + ll_assert(self.header(obj).tid & GCFLAG_CARDS_SET == 0, + "unexpected GCFLAG_CARDS_SET") + # if the GCFLAG_HAS_CARDS is set, check that all bits are zero now + if self.header(obj).tid & GCFLAG_HAS_CARDS: + ll_assert(self.card_page_indices > 0, + "GCFLAG_HAS_CARDS but not using card marking") + typeid = self.get_type_id(obj) + ll_assert(self.has_gcptr_in_varsize(typeid), + "GCFLAG_HAS_CARDS but not has_gcptr_in_varsize") + ll_assert(self.header(obj).tid & GCFLAG_NO_HEAP_PTRS == 0, + "GCFLAG_HAS_CARDS && GCFLAG_NO_HEAP_PTRS") + offset_to_length = self.varsize_offset_to_length(typeid) + length = (obj + offset_to_length).signed[0] + extra_words = self.card_marking_words_for_length(length) + # + size_gc_header = self.gcheaderbuilder.size_gc_header + p = llarena.getfakearenaaddress(obj - size_gc_header) + i = extra_words * WORD + while i > 0: + p -= 1 + ll_assert(p.char[0] == '\x00', + "the card marker bits are not cleared") + i -= 1 # ---------- # Write barrier @@ -570,6 +679,14 @@ if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS: self.remember_young_pointer(addr_struct, newvalue) + def write_barrier_from_array(self, newvalue, addr_array, index): + if self.header(addr_array).tid & GCFLAG_NO_YOUNG_PTRS: + if self.card_page_indices > 0: # <- constant-folded + self.remember_young_pointer_from_array(addr_array, index, + newvalue) + else: + self.remember_young_pointer(addr_array, newvalue) + def _init_writebarrier_logic(self): # The purpose of attaching remember_young_pointer to the instance # instead of keeping it as a regular method is to help the JIT call it. @@ -614,6 +731,63 @@ remember_young_pointer._dont_inline_ = True self.remember_young_pointer = remember_young_pointer + # + if self.card_page_indices > 0: + self._init_writebarrier_with_card_marker() + + + def _init_writebarrier_with_card_marker(self): + def remember_young_pointer_from_array(addr_array, index, addr): + # 'addr_array' is the address of the object in which we write, + # which must have an array part; 'index' is the index of the + # item that is (or contains) the pointer that we write; + # 'addr' is the address that we write in the array. + objhdr = self.header(addr_array) + if objhdr.tid & GCFLAG_HAS_CARDS == 0: + # + # no cards, use default logic. The 'nocard_logic()' is just + # 'remember_young_pointer()', but forced to be inlined here. + nocard_logic(addr_array, addr) + return + # + # 'addr_array' is a raw_malloc'ed array with card markers + # in front. Compute the index of the bit to set: + bitindex = index >> self.card_page_shift + byteindex = bitindex >> 3 + bitmask = 1 << (bitindex & 7) + # + # If the bit is already set, leave now. + size_gc_header = self.gcheaderbuilder.size_gc_header + addr_byte = addr_array - size_gc_header + addr_byte = llarena.getfakearenaaddress(addr_byte) + (~byteindex) + byte = ord(addr_byte.char[0]) + if byte & bitmask: + return + # + # As in remember_young_pointer, check if 'addr' is a valid + # pointer, in case it can be a tagged integer + if (self.config.taggedpointers and + not self.is_valid_gc_object(addr)): + return + # + # If the 'addr' is in the nursery, then we need to set the flag. + # Note that the following check is done after the bit check + # above, because it is expected that the "bit already set" + # situation is the most common. + if self.is_in_nursery(addr): + addr_byte.char[0] = chr(byte | bitmask) + # + if objhdr.tid & GCFLAG_CARDS_SET == 0: + self.old_objects_with_cards_set.append(addr_array) + objhdr.tid |= GCFLAG_CARDS_SET + + nocard_logic = func_with_new_name(self.remember_young_pointer, + 'remember_young_pointer_nocard') + del nocard_logic._dont_inline_ + nocard_logic._always_inline_ = True + remember_young_pointer_from_array._dont_inline_ = True + self.remember_young_pointer_from_array = ( + remember_young_pointer_from_array) def assume_young_pointers(self, addr_struct): @@ -641,7 +815,8 @@ return True # ^^^ a fast path of write-barrier # - if source_hdr.tid & GCFLAG_NO_YOUNG_PTRS == 0: + if (source_hdr.tid & GCFLAG_NO_YOUNG_PTRS == 0 or + source_hdr.tid & GCFLAG_CARDS_SET != 0): # there might be an object in source that is in nursery self.old_objects_pointing_to_young.append(dest_addr) dest_hdr.tid &= ~GCFLAG_NO_YOUNG_PTRS @@ -670,6 +845,11 @@ # 'old_objects_pointing_to_young'. self.collect_roots_in_nursery() # + # If we are using card marking, do a partial trace of the arrays + # that are flagged with GCFLAG_CARDS_SET. + if self.card_page_indices > 0: + self.collect_cardrefs_to_nursery() + # # Now trace objects from 'old_objects_pointing_to_young'. # All nursery objects they reference are copied out of the # nursery, and again added to 'old_objects_pointing_to_young'. @@ -707,6 +887,61 @@ MiniMarkGC._trace_drag_out1, # static in prebuilt non-gc None) # static in prebuilt gc + def collect_cardrefs_to_nursery(self): + size_gc_header = self.gcheaderbuilder.size_gc_header + oldlist = self.old_objects_with_cards_set + while oldlist.non_empty(): + obj = oldlist.pop() + # + # Remove the GCFLAG_CARDS_SET flag. + ll_assert(self.header(obj).tid & GCFLAG_CARDS_SET != 0, + "!GCFLAG_CARDS_SET but object in 'old_objects_with_cards_set'") + self.header(obj).tid &= ~GCFLAG_CARDS_SET + # + # Get the number of card marker bytes in the header. + typeid = self.get_type_id(obj) + offset_to_length = self.varsize_offset_to_length(typeid) + length = (obj + offset_to_length).signed[0] + bytes = self.card_marking_bytes_for_length(length) + p = llarena.getfakearenaaddress(obj - size_gc_header) + # + # If the object doesn't have GCFLAG_NO_YOUNG_PTRS, then it + # means that it is in 'old_objects_pointing_to_young' and + # will be fully traced by collect_oldrefs_to_nursery() just + # afterwards. + if self.header(obj).tid & GCFLAG_NO_YOUNG_PTRS == 0: + # + # In that case, we just have to reset all card bits. + while bytes > 0: + p -= 1 + p.char[0] = '\x00' + bytes -= 1 + # + else: + # Walk the bytes encoding the card marker bits, and for + # each bit set, call trace_and_drag_out_of_nursery_partial(). + interval_start = 0 + while bytes > 0: + p -= 1 + cardbyte = ord(p.char[0]) + p.char[0] = '\x00' # reset the bits + bytes -= 1 + next_byte_start = interval_start + 8*self.card_page_indices + # + while cardbyte != 0: + interval_stop = interval_start + self.card_page_indices + # + if cardbyte & 1: + if interval_stop > length: + interval_stop = length + self.trace_and_drag_out_of_nursery_partial( + obj, interval_start, interval_stop) + # + interval_start = interval_stop + cardbyte >>= 1 + interval_start = next_byte_start + + def collect_oldrefs_to_nursery(self): # Follow the old_objects_pointing_to_young list and move the # young objects they point to out of the nursery. @@ -729,6 +964,15 @@ """ self.trace(obj, self._trace_drag_out, None) + def trace_and_drag_out_of_nursery_partial(self, obj, start, stop): + """Like trace_and_drag_out_of_nursery(), but limited to the array + indices in range(start, stop). + """ + ll_assert(start < stop, "empty or negative range " + "in trace_and_drag_out_of_nursery_partial()") + #print 'trace_partial:', start, stop, '\t', obj + self.trace_partial(obj, start, stop, self._trace_drag_out, None) + def _trace_drag_out1(self, root): self._trace_drag_out(root, None) @@ -917,7 +1161,22 @@ totalsize = size_gc_header + self.get_size(obj) rawtotalsize = llmemory.raw_malloc_usage(totalsize) self.rawmalloced_total_size -= rawtotalsize - llmemory.raw_free(obj - size_gc_header) + arena = llarena.getfakearenaaddress(obj - size_gc_header) + # + # Must also include the card marker area, if any + if (self.card_page_indices > 0 # <- this is constant-folded + and self.header(obj).tid & GCFLAG_HAS_CARDS): + # + # Get the length and compute the number of extra bytes + typeid = self.get_type_id(obj) + ll_assert(self.has_gcptr_in_varsize(typeid), + "GCFLAG_HAS_CARDS but not has_gcptr_in_varsize") + offset_to_length = self.varsize_offset_to_length(typeid) + length = (obj + offset_to_length).signed[0] + extra_words = self.card_marking_words_for_length(length) + arena -= extra_words * WORD + # + llarena.arena_free(arena) # list.delete() Modified: pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py Mon Sep 20 09:48:06 2010 @@ -95,7 +95,10 @@ if self.gc.needs_write_barrier: newaddr = llmemory.cast_ptr_to_adr(newvalue) addr_struct = llmemory.cast_ptr_to_adr(p) - self.gc.write_barrier(newaddr, addr_struct) + if hasattr(self.gc, 'write_barrier_from_array'): + self.gc.write_barrier_from_array(newaddr, addr_struct, index) + else: + self.gc.write_barrier(newaddr, addr_struct) p[index] = newvalue def malloc(self, TYPE, n=None): @@ -485,5 +488,27 @@ # test the GC itself, providing a simple class for ArenaCollection GC_PARAMS = {'ArenaCollectionClass': SimpleArenaCollection} + def test_card_marker(self): + for arraylength in (range(4, 17) + + [69] # 3 bytes + + [300]): # 10 bytes + print 'array length:', arraylength + nums = {} + a = self.malloc(VAR, arraylength) + self.stackroots.append(a) + for i in range(50): + p = self.malloc(S) + p.x = -i + a = self.stackroots[-1] + index = (i*i) % arraylength + self.writearray(a, index, p) + nums[index] = p.x + # + for index, expected_x in nums.items(): + assert a[index].x == expected_x + self.stackroots.pop() + test_card_marker.GC_PARAMS = {"card_page_indices": 4, + "card_page_indices_min": 7} + class TestMiniMarkGCFull(DirectGCTest): from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass Modified: pypy/trunk/pypy/rpython/memory/gc/test/test_minimark.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/test/test_minimark.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/test/test_minimark.py Mon Sep 20 09:48:06 2010 @@ -1,5 +1,6 @@ from pypy.rpython.lltypesystem import llmemory from pypy.rpython.memory.gc.minimark import MiniMarkGC +from pypy.rlib.rarithmetic import LONG_BIT # Note that most tests are in test_direct.py. @@ -23,3 +24,27 @@ size2 = llmemory.raw_malloc_usage(llmemory.sizeof(UNICODE, INIT_SIZE)) size2 = size_gc_header + size2 assert size2 <= MiniMarkGC.TRANSLATION_PARAMS["small_request_threshold"] + +def test_card_marking_words_for_length(): + gc = MiniMarkGC(None, card_page_indices=128) + assert gc.card_page_shift == 7 + P = 128 * LONG_BIT + assert gc.card_marking_words_for_length(1) == 1 + assert gc.card_marking_words_for_length(P) == 1 + assert gc.card_marking_words_for_length(P+1) == 2 + assert gc.card_marking_words_for_length(P+P) == 2 + assert gc.card_marking_words_for_length(P+P+1) == 3 + assert gc.card_marking_words_for_length(P+P+P+P+P+P+P+P) == 8 + assert gc.card_marking_words_for_length(P+P+P+P+P+P+P+P+1) == 9 + +def test_card_marking_bytes_for_length(): + gc = MiniMarkGC(None, card_page_indices=128) + assert gc.card_page_shift == 7 + P = 128 * 8 + assert gc.card_marking_bytes_for_length(1) == 1 + assert gc.card_marking_bytes_for_length(P) == 1 + assert gc.card_marking_bytes_for_length(P+1) == 2 + assert gc.card_marking_bytes_for_length(P+P) == 2 + assert gc.card_marking_bytes_for_length(P+P+1) == 3 + assert gc.card_marking_bytes_for_length(P+P+P+P+P+P+P+P) == 8 + assert gc.card_marking_bytes_for_length(P+P+P+P+P+P+P+P+1) == 9 Modified: pypy/trunk/pypy/rpython/memory/gctransform/framework.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gctransform/framework.py (original) +++ pypy/trunk/pypy/rpython/memory/gctransform/framework.py Mon Sep 20 09:48:06 2010 @@ -182,6 +182,7 @@ gcdata.gc.set_root_walker(root_walker) self.num_pushs = 0 self.write_barrier_calls = 0 + self.write_barrier_from_array_calls = 0 def frameworkgc_setup(): # run-time initialization code @@ -420,6 +421,8 @@ annmodel.SomeInteger(nonneg=True)], annmodel.s_None) + self.write_barrier_ptr = None + self.write_barrier_from_array_ptr = None if GCClass.needs_write_barrier: self.write_barrier_ptr = getfn(GCClass.write_barrier.im_func, [s_gc, @@ -435,8 +438,26 @@ [annmodel.SomeAddress(), annmodel.SomeAddress()], annmodel.s_None) - else: - self.write_barrier_ptr = None + func = getattr(GCClass, 'write_barrier_from_array', None) + if func is not None: + self.write_barrier_from_array_ptr = getfn(func.im_func, + [s_gc, + annmodel.SomeAddress(), + annmodel.SomeAddress(), + annmodel.SomeInteger()], + annmodel.s_None, + inline=True) + func = getattr(gcdata.gc, 'remember_young_pointer_from_array', + None) + if func is not None: + # func should not be a bound method, but a real function + assert isinstance(func, types.FunctionType) + self.write_barrier_from_array_failing_case_ptr = \ + getfn(func, + [annmodel.SomeAddress(), + annmodel.SomeInteger(), + annmodel.SomeAddress()], + annmodel.s_None) self.statistics_ptr = getfn(GCClass.statistics.im_func, [s_gc, annmodel.SomeInteger()], annmodel.SomeInteger()) @@ -523,6 +544,9 @@ if self.write_barrier_ptr: log.info("inserted %s write barrier calls" % ( self.write_barrier_calls, )) + if self.write_barrier_from_array_ptr: + log.info("inserted %s write_barrier_from_array calls" % ( + self.write_barrier_from_array_calls, )) # XXX because we call inputconst already in replace_malloc, we can't # modify the instance, we have to modify the 'rtyped instance' @@ -793,6 +817,12 @@ [self.write_barrier_failing_case_ptr], resultvar=op.result) + def gct_get_write_barrier_from_array_failing_case(self, hop): + op = hop.spaceop + hop.genop("same_as", + [self.write_barrier_from_array_failing_case_ptr], + resultvar=op.result) + def gct_zero_gc_pointers_inside(self, hop): if not self.malloc_zero_filled: v_ob = hop.spaceop.args[0] @@ -971,6 +1001,15 @@ c = rmodel.inputconst(TYPE, lltype.nullptr(TYPE.TO)) return hop.cast_result(c) + def _set_into_gc_array_part(self, op): + if op.opname == 'setarrayitem': + return op.args[1] + if op.opname == 'setinteriorfield': + for v in op.args[1:-1]: + if v.concretetype is not lltype.Void: + return v + return None + def transform_generic_set(self, hop): from pypy.objspace.flow.model import Constant opname = hop.spaceop.opname @@ -984,15 +1023,26 @@ and not isinstance(v_newvalue, Constant) and v_struct.concretetype.TO._gckind == "gc" and hop.spaceop not in self.clean_sets): - self.write_barrier_calls += 1 v_newvalue = hop.genop("cast_ptr_to_adr", [v_newvalue], resulttype = llmemory.Address) v_structaddr = hop.genop("cast_ptr_to_adr", [v_struct], resulttype = llmemory.Address) - hop.genop("direct_call", [self.write_barrier_ptr, - self.c_const_gc, - v_newvalue, - v_structaddr]) + if (self.write_barrier_from_array_ptr is not None and + self._set_into_gc_array_part(hop.spaceop) is not None): + self.write_barrier_from_array_calls += 1 + v_index = self._set_into_gc_array_part(hop.spaceop) + assert v_index.concretetype == lltype.Signed + hop.genop("direct_call", [self.write_barrier_from_array_ptr, + self.c_const_gc, + v_newvalue, + v_structaddr, + v_index]) + else: + self.write_barrier_calls += 1 + hop.genop("direct_call", [self.write_barrier_ptr, + self.c_const_gc, + v_newvalue, + v_structaddr]) hop.rename('bare_' + opname) def transform_getfield_typeptr(self, hop): Modified: pypy/trunk/pypy/rpython/memory/gcwrapper.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gcwrapper.py (original) +++ pypy/trunk/pypy/rpython/memory/gcwrapper.py Mon Sep 20 09:48:06 2010 @@ -15,6 +15,8 @@ self.llinterp = llinterp self.prepare_graphs(flowgraphs) self.gc.setup() + self.has_write_barrier_from_array = hasattr(self.gc, + 'write_barrier_from_array') def prepare_graphs(self, flowgraphs): lltype2vtable = self.llinterp.typer.lltype2vtable @@ -78,13 +80,30 @@ ARRAY = lltype.typeOf(array).TO addr = llmemory.cast_ptr_to_adr(array) addr += llmemory.itemoffsetof(ARRAY, index) - self.setinterior(array, addr, ARRAY.OF, newitem) + self.setinterior(array, addr, ARRAY.OF, newitem, (index,)) - def setinterior(self, toplevelcontainer, inneraddr, INNERTYPE, newvalue): + def setinterior(self, toplevelcontainer, inneraddr, INNERTYPE, newvalue, + offsets=()): if (lltype.typeOf(toplevelcontainer).TO._gckind == 'gc' and isinstance(INNERTYPE, lltype.Ptr) and INNERTYPE.TO._gckind == 'gc'): - self.gc.write_barrier(llmemory.cast_ptr_to_adr(newvalue), - llmemory.cast_ptr_to_adr(toplevelcontainer)) + # + wb = True + if self.has_write_barrier_from_array: + for index in offsets: + if type(index) is not str: + assert (type(index) is int # <- fast path + or lltype.typeOf(index) == lltype.Signed) + self.gc.write_barrier_from_array( + llmemory.cast_ptr_to_adr(newvalue), + llmemory.cast_ptr_to_adr(toplevelcontainer), + index) + wb = False + break + # + if wb: + self.gc.write_barrier( + llmemory.cast_ptr_to_adr(newvalue), + llmemory.cast_ptr_to_adr(toplevelcontainer)) llheap.setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue) def collect(self, *gen): Modified: pypy/trunk/pypy/rpython/memory/test/test_gc.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/test/test_gc.py (original) +++ pypy/trunk/pypy/rpython/memory/test/test_gc.py Mon Sep 20 09:48:06 2010 @@ -770,3 +770,7 @@ from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass GC_CAN_SHRINK_BIG_ARRAY = False GC_CAN_MALLOC_NONMOVABLE = True + +class TestMiniMarkGCCardMarking(TestMiniMarkGC): + GC_PARAMS = {'card_page_indices': 4, + 'card_page_indices_min': 10} Modified: pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py (original) +++ pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py Mon Sep 20 09:48:06 2010 @@ -242,6 +242,26 @@ heap_size = self.heap_usage(statistics) assert heap_size < 16000 * WORD / 4 # xxx + def define_llinterp_dict(self): + class A(object): + pass + def malloc_a_lot(): + i = 0 + while i < 10: + i += 1 + a = (1, 2, i) + b = {a: A()} + j = 0 + while j < 20: + j += 1 + b[1, j, i] = A() + return 0 + return malloc_a_lot + + def test_llinterp_dict(self): + run = self.runner("llinterp_dict") + run([]) + def skipdefine_global_list(cls): gl = [] class Box: @@ -1454,6 +1474,8 @@ 'page_size': 16*WORD, 'arena_size': 64*WORD, 'small_request_threshold': 5*WORD, + 'card_page_indices': 4, + 'card_page_indices_min': 10, } root_stack_depth = 200 From fijal at codespeak.net Mon Sep 20 09:57:20 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Mon, 20 Sep 2010 09:57:20 +0200 (CEST) Subject: [pypy-svn] r77186 - pypy/branch/jitffi/pypy/jit/metainterp/test Message-ID: <20100920075720.876C9282BE3@codespeak.net> Author: fijal Date: Mon Sep 20 09:57:18 2010 New Revision: 77186 Added: pypy/branch/jitffi/pypy/jit/metainterp/test/test_direct_call.py Log: A functional test about optimizing ffi access. Added: pypy/branch/jitffi/pypy/jit/metainterp/test/test_direct_call.py ============================================================================== --- (empty file) +++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_direct_call.py Mon Sep 20 09:57:18 2010 @@ -0,0 +1,40 @@ + +import py +from pypy.rlib.jit import JitDriver +from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.rlib.libffi import FuncPtr, CDLL, ffi_type_sint +from pypy.tool.udir import udir +from pypy.translator.tool.cbuild import ExternalCompilationInfo +from pypy.translator.platform import platform +from pypy.rpython.lltypesystem import lltype, rffi + +class TestDirectCall(LLJitMixin): + def setup_class(cls): + # prepare C code as an example, so we can load it and call + # it via rlib.libffi + c_file = udir.ensure("test_jit_direct_call", dir=1).join("xlib.c") + c_file.write(py.code.Source(''' + int sum_xy(int x, int y) + { + return (x + y); + } + ''')) + eci = ExternalCompilationInfo(export_symbols=['sum_xy']) + cls.lib_name = str(platform.compile([c_file], eci, 'x', + standalone=False)) + + def test_one(self): + driver = JitDriver(reds = ['n', 'fn'], greens = []) + + def f(n): + cdll = CDLL(self.lib_name) + fn = cdll.getpointer('sum_xy', [ffi_type_sint, ffi_type_sint], + ffi_type_sint) + while n < 10: + driver.jit_merge_point(n=n, fn=fn) + driver.can_enter_jit(n=n, fn=fn) + fn.push_arg(n) + fn.push_arg(1) + n = fn.call(lltype.Signed) + + self.meta_interp(f, [0]) From arigo at codespeak.net Mon Sep 20 09:59:49 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Sep 2010 09:59:49 +0200 (CEST) Subject: [pypy-svn] r77187 - pypy/trunk/pypy/module/gc Message-ID: <20100920075949.7275A282BE3@codespeak.net> Author: arigo Date: Mon Sep 20 09:59:48 2010 New Revision: 77187 Modified: pypy/trunk/pypy/module/gc/referents.py Log: Oups, syntax error. Modified: pypy/trunk/pypy/module/gc/referents.py ============================================================================== --- pypy/trunk/pypy/module/gc/referents.py (original) +++ pypy/trunk/pypy/module/gc/referents.py Mon Sep 20 09:59:48 2010 @@ -163,7 +163,7 @@ def _dump_rpy_heap(space, fd): try: - ok = rgc.dump_rpy_heap(fd): + ok = rgc.dump_rpy_heap(fd) except OSError, e: raise wrap_oserror(space, e) if not ok: From arigo at codespeak.net Mon Sep 20 10:09:55 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Sep 2010 10:09:55 +0200 (CEST) Subject: [pypy-svn] r77188 - pypy/extradoc/planning Message-ID: <20100920080955.F1F58282BE3@codespeak.net> Author: arigo Date: Mon Sep 20 10:09:54 2010 New Revision: 77188 Modified: pypy/extradoc/planning/gc.txt pypy/extradoc/planning/gcplan.txt Log: Flag card marking as "partially done". Modified: pypy/extradoc/planning/gc.txt ============================================================================== --- pypy/extradoc/planning/gc.txt (original) +++ pypy/extradoc/planning/gc.txt Mon Sep 20 10:09:54 2010 @@ -1,6 +1,12 @@ Card marking GC for PyPy ======================== + +UPDATE: partially done in the minimark GC: large arrays are +allocated with a few extra bytes containing card marker bits. + + + With a generational GC one needs to keep track of references from the old generation to the young one using a write barrier. Currently this is implemented with a list of old objects that contain pointers to young objects. Modified: pypy/extradoc/planning/gcplan.txt ============================================================================== --- pypy/extradoc/planning/gcplan.txt (original) +++ pypy/extradoc/planning/gcplan.txt Mon Sep 20 10:09:54 2010 @@ -1,4 +1,9 @@ + +UPDATE: partially done in the minimark GC: large arrays are +allocated with a few extra bytes containing card marker bits. + + The problem: ============ From fijal at codespeak.net Mon Sep 20 10:15:27 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Mon, 20 Sep 2010 10:15:27 +0200 (CEST) Subject: [pypy-svn] r77189 - pypy/trunk/pypy/jit/backend/x86 Message-ID: <20100920081527.DF742282BE3@codespeak.net> Author: fijal Date: Mon Sep 20 10:15:26 2010 New Revision: 77189 Modified: pypy/trunk/pypy/jit/backend/x86/assembler.py Log: One style of multi-line imports Modified: pypy/trunk/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/assembler.py (original) +++ pypy/trunk/pypy/jit/backend/x86/assembler.py Mon Sep 20 10:15:26 2010 @@ -1,16 +1,17 @@ import sys, os from pypy.jit.backend.llsupport import symbolic from pypy.jit.metainterp.history import Const, Box, BoxInt, BoxPtr, BoxFloat -from pypy.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT,\ - LoopToken +from pypy.jit.metainterp.history import (AbstractFailDescr, INT, REF, FLOAT, + LoopToken) from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.tool.uid import fixid -from pypy.jit.backend.x86.regalloc import RegAlloc, \ - X86RegisterManager, X86XMMRegisterManager, get_ebp_ofs +from pypy.jit.backend.x86.regalloc import (RegAlloc, X86RegisterManager, + X86XMMRegisterManager, get_ebp_ofs) -from pypy.jit.backend.x86.arch import FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, IS_X86_32, IS_X86_64 +from pypy.jit.backend.x86.arch import (FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, + IS_X86_32, IS_X86_64) from pypy.jit.backend.x86.regloc import (eax, ecx, edx, ebx, esp, ebp, esi, edi, From fijal at codespeak.net Mon Sep 20 10:38:41 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Mon, 20 Sep 2010 10:38:41 +0200 (CEST) Subject: [pypy-svn] r77190 - pypy/trunk/pypy/jit/backend/x86 Message-ID: <20100920083841.BFE04282BE3@codespeak.net> Author: fijal Date: Mon Sep 20 10:38:40 2010 New Revision: 77190 Modified: pypy/trunk/pypy/jit/backend/x86/assembler.py Log: Sprinkle a bit more valgrind_invalidated here and there Modified: pypy/trunk/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/assembler.py (original) +++ pypy/trunk/pypy/jit/backend/x86/assembler.py Mon Sep 20 10:38:40 2010 @@ -475,6 +475,7 @@ # align, e.g. for Mac OS X aligned_words = align_stack_words(words+2)-2 # 2 = EIP+EBP mc.writeimm32(-WORD * aligned_words) + mc.valgrind_invalidated() mc.done() def _call_header(self): @@ -597,6 +598,7 @@ target = newlooptoken._x86_direct_bootstrap_code mc = codebuf.InMemoryCodeBuilder(oldadr, oldadr + 16) mc.JMP(imm(target)) + mc.valgrind_invalidated() mc.done() def _assemble_bootstrap_code(self, inputargs, arglocs): From arigo at codespeak.net Mon Sep 20 10:51:50 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Sep 2010 10:51:50 +0200 (CEST) Subject: [pypy-svn] r77191 - pypy/trunk/pypy/rpython/lltypesystem Message-ID: <20100920085150.BF86E282BE3@codespeak.net> Author: arigo Date: Mon Sep 20 10:51:49 2010 New Revision: 77191 Modified: pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py Log: That's just a hack. Fixes jit.backend.x86.test_basic.test_identityhash. Modified: pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py Mon Sep 20 10:51:49 2010 @@ -349,6 +349,9 @@ cache[cls1, cls2] = subcls return subcls +_hack_stay_alive = [None] * 64 +_hack_stay_alive_index = 0 + def add_storage(instance, mixin_cls, ctypes_storage): """Put ctypes_storage on the instance, changing its __class__ so that it sees the methods of the given mixin class.""" @@ -357,6 +360,12 @@ subcls = get_common_subclass(mixin_cls, instance.__class__) instance.__class__ = subcls instance._storage = ctypes_storage + # + # hack hack hack. Avoids reusing too fast the same hashes :-/ + global _hack_stay_alive_index + _hack_stay_alive_index = (_hack_stay_alive_index + 1) & 63 + _hack_stay_alive[_hack_stay_alive_index] = instance + class _parentable_mixin(object): """Mixin added to _parentable containers when they become ctypes-based. From arigo at codespeak.net Mon Sep 20 11:13:42 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Sep 2010 11:13:42 +0200 (CEST) Subject: [pypy-svn] r77192 - pypy/trunk/pypy/rpython Message-ID: <20100920091342.E1654282BE3@codespeak.net> Author: arigo Date: Mon Sep 20 11:13:41 2010 New Revision: 77192 Modified: pypy/trunk/pypy/rpython/llinterp.py Log: Add missing operations. Modified: pypy/trunk/pypy/rpython/llinterp.py ============================================================================== --- pypy/trunk/pypy/rpython/llinterp.py (original) +++ pypy/trunk/pypy/rpython/llinterp.py Mon Sep 20 11:13:41 2010 @@ -916,6 +916,24 @@ def op_gc_get_type_info_group(self): raise NotImplementedError("gc_get_type_info_group") + def op_gc_get_rpy_memory_usage(self): + raise NotImplementedError("gc_get_rpy_memory_usage") + + def op_gc_get_rpy_roots(self): + raise NotImplementedError("gc_get_rpy_roots") + + def op_gc_get_rpy_referents(self): + raise NotImplementedError("gc_get_rpy_referents") + + def op_gc_is_rpy_instance(self): + raise NotImplementedError("gc_is_rpy_instance") + + def op_gc_get_rpy_type_index(self): + raise NotImplementedError("gc_get_rpy_type_index") + + def op_gc_dump_rpy_heap(self): + raise NotImplementedError("gc_dump_rpy_heap") + def op_do_malloc_fixedsize_clear(self): raise NotImplementedError("do_malloc_fixedsize_clear") @@ -925,6 +943,9 @@ def op_get_write_barrier_failing_case(self): raise NotImplementedError("get_write_barrier_failing_case") + def op_get_write_barrier_from_array_failing_case(self): + raise NotImplementedError("get_write_barrier_from_array_failing_case") + def op_yield_current_frame_to_caller(self): raise NotImplementedError("yield_current_frame_to_caller") From arigo at codespeak.net Mon Sep 20 11:14:33 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Sep 2010 11:14:33 +0200 (CEST) Subject: [pypy-svn] r77193 - pypy/trunk/pypy/rpython/lltypesystem Message-ID: <20100920091433.EE5AD282BE3@codespeak.net> Author: arigo Date: Mon Sep 20 11:14:32 2010 New Revision: 77193 Modified: pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py Log: Revert my changes to ll2ctypes: we decided that the hash of an ll2ctypes object can change when the object is converted from lltype. Modified: pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py Mon Sep 20 11:14:32 2010 @@ -349,23 +349,13 @@ cache[cls1, cls2] = subcls return subcls -_hack_stay_alive = [None] * 64 -_hack_stay_alive_index = 0 - def add_storage(instance, mixin_cls, ctypes_storage): """Put ctypes_storage on the instance, changing its __class__ so that it sees the methods of the given mixin class.""" assert not isinstance(instance, _parentable_mixin) # not yet - ctypes_storage._preserved_hash = hash(instance) subcls = get_common_subclass(mixin_cls, instance.__class__) instance.__class__ = subcls instance._storage = ctypes_storage - # - # hack hack hack. Avoids reusing too fast the same hashes :-/ - global _hack_stay_alive_index - _hack_stay_alive_index = (_hack_stay_alive_index + 1) & 63 - _hack_stay_alive[_hack_stay_alive_index] = instance - class _parentable_mixin(object): """Mixin added to _parentable containers when they become ctypes-based. @@ -412,8 +402,6 @@ def __hash__(self): if self._storage is not None: - if hasattr(self._storage, '_preserved_hash'): - return self._storage._preserved_hash return ctypes.addressof(self._storage) else: return object.__hash__(self) From arigo at codespeak.net Mon Sep 20 11:25:56 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Sep 2010 11:25:56 +0200 (CEST) Subject: [pypy-svn] r77194 - pypy/trunk/pypy/rpython/lltypesystem Message-ID: <20100920092556.D28AA282BE3@codespeak.net> Author: arigo Date: Mon Sep 20 11:25:55 2010 New Revision: 77194 Modified: pypy/trunk/pypy/rpython/lltypesystem/llarena.py Log: A quick hack that is enough to handle changing hashes in my particular case (test_transformed_gc, TestMiniMarkGC). Modified: pypy/trunk/pypy/rpython/lltypesystem/llarena.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/llarena.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/llarena.py Mon Sep 20 11:25:55 2010 @@ -46,7 +46,7 @@ assert offset >= stop, "object overlaps cleared area" else: obj = ptr._obj - del Arena.object_arena_location[obj] + _dictdel(Arena.object_arena_location, obj) del self.objectptrs[offset] del self.objectsizes[offset] obj._free() @@ -258,6 +258,16 @@ raise RuntimeError(msg % (obj,)) return arena.getaddr(offset) +def _dictdel(d, key): + # hack + try: + del d[key] + except KeyError: + items = d.items() + d.clear() + d.update(items) + del d[key] + class RoundedUpForAllocation(llmemory.AddressOffset): """A size that is rounded up in order to preserve alignment of objects following it. For arenas containing heterogenous objects. From arigo at codespeak.net Mon Sep 20 12:17:39 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Sep 2010 12:17:39 +0200 (CEST) Subject: [pypy-svn] r77195 - pypy/trunk/pypy/translator Message-ID: <20100920101739.E796B282BE3@codespeak.net> Author: arigo Date: Mon Sep 20 12:17:38 2010 New Revision: 77195 Modified: pypy/trunk/pypy/translator/exceptiontransform.py Log: Fix, as shown by test_transformed_gc.TestMarkCompactGC.test_llinterp_dict. Modified: pypy/trunk/pypy/translator/exceptiontransform.py ============================================================================== --- pypy/trunk/pypy/translator/exceptiontransform.py (original) +++ pypy/trunk/pypy/translator/exceptiontransform.py Mon Sep 20 12:17:38 2010 @@ -277,7 +277,9 @@ block.exits[0].target is graph.returnblock and len(block.operations) and (block.exits[0].args[0].concretetype is lltype.Void or - block.exits[0].args[0] is block.operations[-1].result)): + block.exits[0].args[0] is block.operations[-1].result) and + block.operations[-1].opname not in ('malloc', # special cases + 'malloc_nonmovable')): last_operation -= 1 lastblock = block for i in range(last_operation, -1, -1): @@ -466,6 +468,9 @@ c_flags = spaceop.args[1] c_flags.value = c_flags.value.copy() spaceop.args[1].value['zero'] = True + # NB. when inserting more special-cases here, keep in mind that + # you also need to list the opnames in transform_block() + # (see "special cases") if insert_zeroing_op: if normalafterblock is None: From antocuni at codespeak.net Mon Sep 20 12:21:18 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Mon, 20 Sep 2010 12:21:18 +0200 (CEST) Subject: [pypy-svn] r77196 - in pypy/branch/resoperation-refactoring/pypy/jit/metainterp: . optimizeopt test Message-ID: <20100920102118.A1B2A282BE3@codespeak.net> Author: antocuni Date: Mon Sep 20 12:21:16 2010 New Revision: 77196 Added: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_resoperation.py (contents, props changed) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/oparser.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py Log: complete the refactoring, and create a class for each operation Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py Mon Sep 20 12:21:16 2010 @@ -292,7 +292,7 @@ for i in range(len(specnodes)): value = self.getvalue(op.getarg(i)) specnodes[i].teardown_virtual_node(self, value, exitargs) - op.setarglist(exitargs[:]) + op = op.copy_and_change(op.getopnum(), args=exitargs[:]) self.emit_operation(op) def optimize_VIRTUAL_REF(self, op): Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py Mon Sep 20 12:21:16 2010 @@ -2,101 +2,91 @@ from pypy.rlib.debug import make_sure_not_resized def ResOperation(opnum, args, result, descr=None): - return BaseResOperation(opnum, args, result, descr) + cls = opclasses[opnum] + op = cls(result) + op.initarglist(args) + if descr is not None: + assert isinstance(op, ResOpWithDescr) + op.setdescr(descr) + return op -class BaseResOperation(object): - """The central ResOperation class, representing one operation.""" - __slots__ = ['_fail_args', '_opnum', '_args', 'result', '_descr', - 'name', 'pc', '_exc_box', '__weakref__'] +class AbstractResOp(object): + """The central ResOperation class, representing one operation.""" # debug - ## name = "" - ## pc = 0 + name = "" + pc = 0 - def __init__(self, opnum, args, result, descr=None): - make_sure_not_resized(args) - assert isinstance(opnum, int) - self._opnum = opnum - self._args = list(args) - make_sure_not_resized(self._args) - assert not isinstance(result, list) + def __init__(self, result): self.result = result - self.setdescr(descr) - - self._fail_args = None - self.pc = 0 - self.name = '' - ## def __init__(self, result): - ## self.result = result + # methods implemented by each concrete class + # ------------------------------------------ + + def getopnum(self): + raise NotImplementedError - def copy_and_change(self, opnum, args=None, result=None, descr=None): - "shallow copy: the returned operation is meant to be used in place of self" - if args is None: - args = self.getarglist() - if result is None: - result = self.result - if descr is None: - descr = self.getdescr() - newop = ResOperation(opnum, args, result, descr) - #if isinstance(self, GuardOperation) - newop.setfailargs(self.getfailargs()) - return newop + # methods implemented by the arity mixins + # --------------------------------------- - def getopnum(self): - return self._opnum - #raise NotImplementedError + def initarglist(self, args): + "This is supposed to be called only just after the ResOp has been created" + raise NotImplementedError + + def getarglist(self): + raise NotImplementedError def getarg(self, i): - return self._args[i] - #raise NotImplementedError + raise NotImplementedError def setarg(self, i, box): - self._args[i] = box - #raise NotImplementedError + raise NotImplementedError def numargs(self): - return len(self._args) - #raise NotImplementedError + raise NotImplementedError - def setarglist(self, args): - # XXX: is it really needed? - self._args = args - #raise NotImplementedError - def getarglist(self): - return self._args - #raise NotImplementedError + # methods implemented by GuardResOp + # --------------------------------- def getfailargs(self): - return self._fail_args - #raise NotImplementedError + return None def setfailargs(self, fail_args): - self._fail_args = fail_args + raise NotImplementedError + + # methods implemented by ResOpWithDescr + # ------------------------------------- def getdescr(self): - return self._descr + return None - def setdescr(self, descr): - # for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt - # instance provided by the backend holding details about the type - # of the operation. It must inherit from AbstractDescr. The - # backend provides it with cpu.fielddescrof(), cpu.arraydescrof(), - # cpu.calldescrof(), and cpu.typedescrof(). - from pypy.jit.metainterp.history import check_descr - check_descr(descr) - self._descr = descr + def setdescr(self): + raise NotImplementedError + + # common methods + # -------------- + + def copy_and_change(self, opnum, args=None, result=None, descr=None): + "shallow copy: the returned operation is meant to be used in place of self" + if args is None: + args = self.getarglist() + if result is None: + result = self.result + if descr is None: + descr = self.getdescr() + newop = ResOperation(opnum, args, result, descr) + return newop def clone(self): - descr = self._descr + args = self.getarglist() + descr = self.getdescr() if descr is not None: descr = descr.clone_if_mutable() - op = ResOperation(self.getopnum(), self._args, self.result, descr) - op._fail_args = self._fail_args - op.name = self.name + op = ResOperation(self.getopnum(), args, self.result, descr) if not we_are_translated(): + op.name = self.name op.pc = self.pc return op @@ -113,12 +103,14 @@ prefix = "%s:%s " % (self.name, self.pc) else: prefix = "" - if self.getdescr() is None or we_are_translated(): + args = self.getarglist() + descr = self.getdescr() + if descr is None or we_are_translated(): return '%s%s%s(%s)' % (prefix, sres, self.getopname(), - ', '.join([str(a) for a in self._args])) + ', '.join([str(a) for a in args])) else: return '%s%s%s(%s, descr=%r)' % (prefix, sres, self.getopname(), - ', '.join([str(a) for a in self._args]), self._descr) + ', '.join([str(a) for a in args]), descr) def getopname(self): try: @@ -167,30 +159,215 @@ return opboolresult[opnum] +# =================== +# Top of the hierachy +# =================== + +class PlainResOp(AbstractResOp): + pass + +class ResOpWithDescr(AbstractResOp): + + _descr = None + + def getdescr(self): + return self._descr + + def setdescr(self, descr): + # for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt + # instance provided by the backend holding details about the type + # of the operation. It must inherit from AbstractDescr. The + # backend provides it with cpu.fielddescrof(), cpu.arraydescrof(), + # cpu.calldescrof(), and cpu.typedescrof(). + from pypy.jit.metainterp.history import check_descr + check_descr(descr) + self._descr = descr + +class GuardResOp(ResOpWithDescr): + + _fail_args = None + + def getfailargs(self): + return self._fail_args + + def setfailargs(self, fail_args): + self._fail_args = fail_args + + def copy_and_change(self, opnum, args=None, result=None, descr=None): + newop = AbstractResOp.copy_and_change(self, opnum, args, result, descr) + newop.setfailargs(self.getfailargs()) + return newop + + def clone(self): + newop = AbstractResOp.clone(self) + newop.setfailargs(self.getfailargs()) + return newop + + +# ============ +# arity mixins +# ============ + +class NullaryOp(object): + _mixin_ = True + + def initarglist(self, args): + assert len(args) == 0 + + def getarglist(self): + return [] + + def numargs(self): + return 0 + + def getarg(self, i): + raise IndexError + + def setarg(self, i, box): + raise IndexError + + +class UnaryOp(object): + _mixin_ = True + _arg0 = None + + def initarglist(self, args): + assert len(args) == 1 + self._arg0, = args + + def getarglist(self): + return [self._arg0] + + def numargs(self): + return 1 + + def getarg(self, i): + if i == 0: + return self._arg0 + else: + raise IndexError + + def setarg(self, i, box): + if i == 0: + self._arg0 = box + else: + raise IndexError + + +class BinaryOp(object): + _mixin_ = True + _arg0 = None + _arg1 = None + + def initarglist(self, args): + assert len(args) == 2 + self._arg0, self._arg1 = args + + def getarglist(self): + return [self._arg0, self._arg1, self._arg2] + + def numargs(self): + return 2 + + def getarg(self, i): + if i == 0: + return self._arg0 + elif i == 1: + return self._arg1 + else: + raise IndexError + + def setarg(self, i, box): + if i == 0: + self._arg0 = box + elif i == 1: + self._arg1 = box + else: + raise IndexError + + def getarglist(self): + return [self._arg0, self._arg1] + + +class TernaryOp(object): + _mixin_ = True + _arg0 = None + _arg1 = None + _arg2 = None + + def initarglist(self, args): + assert len(args) == 3 + self._arg0, self._arg1, self._arg2 = args + + def getarglist(self): + return [self._arg0, self._arg1, self._arg2] + + def numargs(self): + return 3 + + def getarg(self, i): + if i == 0: + return self._arg0 + elif i == 1: + return self._arg1 + elif i == 2: + return self._arg2 + else: + raise IndexError + + def setarg(self, i, box): + if i == 0: + self._arg0 = box + elif i == 1: + self._arg1 = box + elif i == 2: + self._arg2 = box + else: + raise IndexError + +class N_aryOp(object): + _mixin_ = True + _args = None + + def initarglist(self, args): + self._args = args + + def getarglist(self): + return self._args + + def numargs(self): + return len(self._args) + + def getarg(self, i): + return self._args[i] + + def setarg(self, i, box): + self._args[i] = box + # ____________________________________________________________ _oplist = [ '_FINAL_FIRST', - 'JUMP', - 'FINISH', + 'JUMP/*d', + 'FINISH/*d', '_FINAL_LAST', '_GUARD_FIRST', '_GUARD_FOLDABLE_FIRST', - 'GUARD_TRUE', - 'GUARD_FALSE', - 'GUARD_VALUE', - 'GUARD_CLASS', - 'GUARD_NONNULL', - 'GUARD_ISNULL', - 'GUARD_NONNULL_CLASS', + 'GUARD_TRUE/1d', + 'GUARD_FALSE/1d', + 'GUARD_VALUE/2d', + 'GUARD_CLASS/2d', + 'GUARD_NONNULL/1d', + 'GUARD_ISNULL/1d', + 'GUARD_NONNULL_CLASS/2d', '_GUARD_FOLDABLE_LAST', - 'GUARD_NO_EXCEPTION', - 'GUARD_EXCEPTION', - 'GUARD_NO_OVERFLOW', - 'GUARD_OVERFLOW', - 'GUARD_NOT_FORCED', + 'GUARD_NO_EXCEPTION/0d', + 'GUARD_EXCEPTION/1d', + 'GUARD_NO_OVERFLOW/0d', + 'GUARD_OVERFLOW/0d', + 'GUARD_NOT_FORCED/0d', '_GUARD_LAST', # ----- end of guard operations ----- '_NOSIDEEFFECT_FIRST', # ----- start of no_side_effect operations ----- @@ -279,18 +456,18 @@ 'UNICODESETITEM/3', 'NEWUNICODE/1', #'RUNTIMENEW/1', # ootype operation - 'COND_CALL_GC_WB', # [objptr, newvalue] (for the write barrier) + 'COND_CALL_GC_WB/2d', # [objptr, newvalue] (for the write barrier) 'DEBUG_MERGE_POINT/1', # debugging only 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend '_CANRAISE_FIRST', # ----- start of can_raise operations ----- - 'CALL', - 'CALL_ASSEMBLER', - 'CALL_MAY_FORCE', - 'CALL_LOOPINVARIANT', + 'CALL/*d', + 'CALL_ASSEMBLER/*d', + 'CALL_MAY_FORCE/*d', + 'CALL_LOOPINVARIANT/*d', #'OOSEND', # ootype operation #'OOSEND_PURE', # ootype operation - 'CALL_PURE', # removed before it's passed to the backend + 'CALL_PURE/*d', # removed before it's passed to the backend # CALL_PURE(result, func, arg_1,..,arg_n) '_CANRAISE_LAST', # ----- end of can_raise operations ----- @@ -307,6 +484,7 @@ class rop(object): pass +opclasses = [] # mapping numbers to the concrete ResOp class opname = {} # mapping numbers to the original names, for debugging oparity = [] # mapping numbers to the arity of the operation or -1 opwithdescr = [] # mapping numbers to a flag "takes a descr" @@ -321,16 +499,50 @@ name, arity = name.split('/') withdescr = 'd' in arity boolresult = 'b' in arity - arity = int(arity.rstrip('db')) + arity = arity.rstrip('db') + if arity == '*': + arity = -1 + else: + arity = int(arity) else: arity, withdescr, boolresult = -1, True, False # default setattr(rop, name, i) if not name.startswith('_'): opname[i] = name + cls = create_class_for_op(name, i, arity, withdescr) + else: + cls = None + opclasses.append(cls) oparity.append(arity) opwithdescr.append(withdescr) opboolresult.append(boolresult) - assert len(oparity)==len(opwithdescr)==len(opboolresult)==len(_oplist) + assert len(opclasses)==len(oparity)==len(opwithdescr)==len(opboolresult)==len(_oplist) + +def create_class_for_op(name, opnum, arity, withdescr): + arity2mixin = { + 0: NullaryOp, + 1: UnaryOp, + 2: BinaryOp, + 3: TernaryOp + } + + is_guard = name.startswith('GUARD') + if is_guard: + assert withdescr + baseclass = GuardResOp + elif withdescr: + baseclass = ResOpWithDescr + else: + baseclass = PlainResOp + mixin = arity2mixin.get(arity, N_aryOp) + + def getopnum(self): + return opnum + + cls_name = '%s_OP' % name + bases = (mixin, baseclass) + dic = {'getopnum': getopnum} + return type(cls_name, bases, dic) setup(__name__ == '__main__') # print out the table when run directly del _oplist Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/oparser.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/oparser.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/oparser.py Mon Sep 20 12:21:16 2010 @@ -6,7 +6,7 @@ from pypy.jit.metainterp.history import TreeLoop, BoxInt, ConstInt,\ ConstObj, ConstPtr, Box, BasicFailDescr, BoxFloat, ConstFloat,\ LoopToken -from pypy.jit.metainterp.resoperation import rop, ResOperation +from pypy.jit.metainterp.resoperation import rop, ResOperation, ResOpWithDescr, N_aryOp from pypy.jit.metainterp.typesystem import llhelper from pypy.jit.codewriter.heaptracker import adr2int from pypy.rpython.lltypesystem import lltype, llmemory @@ -16,10 +16,22 @@ class ParseError(Exception): pass - class Boxes(object): pass +class ESCAPE_OP(N_aryOp, ResOpWithDescr): + + OPNUM = -123 + + def __init__(self, opnum, args, result, descr=None): + assert opnum == self.OPNUM + self.result = result + self.initarglist(args) + self.setdescr(descr) + + def getopnum(self): + return self.OPNUM + class ExtendedTreeLoop(TreeLoop): def getboxes(self): @@ -171,7 +183,7 @@ opnum = getattr(rop, opname.upper()) except AttributeError: if opname == 'escape': - opnum = -123 + opnum = ESCAPE_OP.OPNUM else: raise ParseError("unknown op: %s" % opname) endnum = line.rfind(')') @@ -228,6 +240,12 @@ descr = self.looptoken return opnum, args, descr, fail_args + def create_op(self, opnum, args, result, descr): + if opnum == ESCAPE_OP.OPNUM: + return ESCAPE_OP(opnum, args, result, descr) + else: + return ResOperation(opnum, args, result, descr) + def parse_result_op(self, line): res, op = line.split("=", 1) res = res.strip() @@ -237,14 +255,16 @@ raise ParseError("Double assign to var %s in line: %s" % (res, line)) rvar = self.box_for_var(res) self.vars[res] = rvar - res = ResOperation(opnum, args, rvar, descr) - res.setfailargs(fail_args) + res = self.create_op(opnum, args, rvar, descr) + if fail_args is not None: + res.setfailargs(fail_args) return res def parse_op_no_result(self, line): opnum, args, descr, fail_args = self.parse_op(line) - res = ResOperation(opnum, args, None, descr) - res.setfailargs(fail_args) + res = self.create_op(opnum, args, None, descr) + if fail_args is not None: + res.setfailargs(fail_args) return res def parse_next_op(self, line): Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py Mon Sep 20 12:21:16 2010 @@ -42,7 +42,7 @@ opt = optimizeopt.Optimizer(FakeMetaInterpStaticData(LLtypeMixin.cpu), None) fdescr = ResumeGuardDescr(None, None) - op = ResOperation(rop.GUARD_TRUE, [], None, descr=fdescr) + op = ResOperation(rop.GUARD_TRUE, ['dummy'], None, descr=fdescr) # setup rd data fi0 = resume.FrameInfo(None, "code0", 11) fdescr.rd_frame_info_list = resume.FrameInfo(fi0, "code1", 33) Added: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_resoperation.py ============================================================================== --- (empty file) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_resoperation.py Mon Sep 20 12:21:16 2010 @@ -0,0 +1,54 @@ +import py +from pypy.jit.metainterp import resoperation as rop +from pypy.jit.metainterp.history import AbstractDescr + +def test_arity_mixins(): + cases = [ + (0, rop.NullaryOp), + (1, rop.UnaryOp), + (2, rop.BinaryOp), + (3, rop.TernaryOp), + (9, rop.N_aryOp) + ] + + def test_case(n, cls): + obj = cls() + obj.initarglist(range(n)) + assert obj.getarglist() == range(n) + for i in range(n): + obj.setarg(i, i*2) + assert obj.numargs() == n + for i in range(n): + assert obj.getarg(i) == i*2 + py.test.raises(IndexError, obj.getarg, n+1) + py.test.raises(IndexError, obj.setarg, n+1, 0) + + for n, cls in cases: + test_case(n, cls) + +def test_concrete_classes(): + cls = rop.opclasses[rop.rop.INT_ADD] + assert issubclass(cls, rop.PlainResOp) + assert issubclass(cls, rop.BinaryOp) + assert cls.getopnum.im_func(None) == rop.rop.INT_ADD + + cls = rop.opclasses[rop.rop.CALL] + assert issubclass(cls, rop.ResOpWithDescr) + assert issubclass(cls, rop.N_aryOp) + assert cls.getopnum.im_func(None) == rop.rop.CALL + + cls = rop.opclasses[rop.rop.GUARD_TRUE] + assert issubclass(cls, rop.GuardResOp) + assert issubclass(cls, rop.UnaryOp) + assert cls.getopnum.im_func(None) == rop.rop.GUARD_TRUE + +def test_instantiate(): + op = rop.ResOperation(rop.rop.INT_ADD, ['a', 'b'], 'c') + assert op.getarglist() == ['a', 'b'] + assert op.result == 'c' + + mydescr = AbstractDescr() + op = rop.ResOperation(rop.rop.CALL, ['a', 'b'], 'c', descr=mydescr) + assert op.getarglist() == ['a', 'b'] + assert op.result == 'c' + assert op.getdescr() is mydescr From afa at codespeak.net Mon Sep 20 13:09:35 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Mon, 20 Sep 2010 13:09:35 +0200 (CEST) Subject: [pypy-svn] r77197 - in pypy/trunk/pypy: rlib rpython/lltypesystem rpython/lltypesystem/test Message-ID: <20100920110935.EA793282BE3@codespeak.net> Author: afa Date: Mon Sep 20 13:09:34 2010 New Revision: 77197 Modified: pypy/trunk/pypy/rlib/rwin32.py pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py pypy/trunk/pypy/rpython/lltypesystem/test/test_ll2ctypes.py Log: Change the _storage attribute: instead of a ctypes object (struct or array), it now stores a ctypes.pointer to the allocated structure. This makes it possible to directly cast a number to a lltype.Ptr, and perform comparisons. Modified: pypy/trunk/pypy/rlib/rwin32.py ============================================================================== --- pypy/trunk/pypy/rlib/rwin32.py (original) +++ pypy/trunk/pypy/rlib/rwin32.py Mon Sep 20 13:09:34 2010 @@ -82,6 +82,8 @@ if WIN32: HANDLE = rffi.COpaquePtr(typedef='HANDLE') + assert rffi.cast(HANDLE, -1) == rffi.cast(HANDLE, -1) + LPHANDLE = rffi.CArrayPtr(HANDLE) HMODULE = HANDLE NULL_HANDLE = rffi.cast(HANDLE, 0) Modified: pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py Mon Sep 20 13:09:34 2010 @@ -26,9 +26,6 @@ from pypy.translator.platform import platform from array import array -def uaddressof(obj): - return fixid(ctypes.addressof(obj)) - _ctypes_cache = {} _eci_cache = {} @@ -251,7 +248,7 @@ else: n = None cstruct = cls._malloc(n) - add_storage(container, _struct_mixin, cstruct) + add_storage(container, _struct_mixin, ctypes.pointer(cstruct)) for field_name in STRUCT._names: FIELDTYPE = getattr(STRUCT, field_name) field_value = getattr(container, field_name) @@ -264,8 +261,6 @@ if isinstance(FIELDTYPE, lltype.Struct): csubstruct = getattr(cstruct, field_name) convert_struct(field_value, csubstruct) - subcontainer = getattr(container, field_name) - substorage = subcontainer._storage elif field_name == STRUCT._arrayfld: # inlined var-sized part csubarray = getattr(cstruct, field_name) convert_array(field_value, csubarray) @@ -292,7 +287,7 @@ # regular case: allocate a new ctypes array of the proper type cls = get_ctypes_type(ARRAY) carray = cls._malloc(container.getlength()) - add_storage(container, _array_mixin, carray) + add_storage(container, _array_mixin, ctypes.pointer(carray)) if not isinstance(ARRAY.OF, lltype.ContainerType): # fish that we have enough space ctypes_array = ctypes.cast(carray.items, @@ -321,13 +316,15 @@ if isinstance(FIELDTYPE, lltype.ContainerType): if isinstance(FIELDTYPE, lltype.Struct): struct_container = getattr(container, field_name) - struct_storage = getattr(ctypes_storage, field_name) + struct_storage = ctypes.pointer( + getattr(ctypes_storage.contents, field_name)) struct_use_ctypes_storage(struct_container, struct_storage) struct_container._setparentstructure(container, field_name) elif isinstance(FIELDTYPE, lltype.Array): assert FIELDTYPE._hints.get('nolength', False) == False arraycontainer = _array_of_known_length(FIELDTYPE) - arraycontainer._storage = getattr(ctypes_storage, field_name) + arraycontainer._storage = ctypes.pointer( + getattr(ctypes_storage.contents, field_name)) arraycontainer._setparentstructure(container, field_name) object.__setattr__(container, field_name, arraycontainer) else: @@ -352,6 +349,8 @@ def add_storage(instance, mixin_cls, ctypes_storage): """Put ctypes_storage on the instance, changing its __class__ so that it sees the methods of the given mixin class.""" + # _storage is a ctypes pointer to a structure + # except for Opaque objects which use a c_void_p. assert not isinstance(instance, _parentable_mixin) # not yet subcls = get_common_subclass(mixin_cls, instance.__class__) instance.__class__ = subcls @@ -365,17 +364,23 @@ __slots__ = () def _ctypes_storage_was_allocated(self): - addr = ctypes.addressof(self._storage) + addr = ctypes.cast(self._storage, ctypes.c_void_p).value if addr in ALLOCATED: raise Exception("internal ll2ctypes error - " "double conversion from lltype to ctypes?") # XXX don't store here immortal structures ALLOCATED[addr] = self + def _addressof_storage(self): + "Returns the storage address as an int" + if self._storage is None or self._storage is True: + raise ValueError("Not a ctypes allocated structure") + return ctypes.cast(self._storage, ctypes.c_void_p).value + def _free(self): self._check() # no double-frees # allow the ctypes object to go away now - addr = ctypes.addressof(self._storage) + addr = ctypes.cast(self._storage, ctypes.c_void_p).value try: del ALLOCATED[addr] except KeyError: @@ -393,16 +398,16 @@ raise RuntimeError("pointer comparison with a freed structure") if other._storage is True: return False # the other container is not ctypes-based - addressof_other = ctypes.addressof(other._storage) - # both containers are ctypes-based, compare by address - return (ctypes.addressof(self._storage) == addressof_other) + addressof_other = other._addressof_storage() + # both containers are ctypes-based, compare the addresses + return self._addressof_storage() == addressof_other def __ne__(self, other): return not (self == other) def __hash__(self): if self._storage is not None: - return ctypes.addressof(self._storage) + return self._addressof_storage() else: return object.__hash__(self) @@ -411,7 +416,7 @@ return '' % (self._TYPE,) else: return '' % (self._TYPE, - uaddressof(self._storage),) + fixid(self._addressof_storage())) def __str__(self): return repr(self) @@ -422,7 +427,7 @@ def __getattr__(self, field_name): T = getattr(self._TYPE, field_name) - cobj = getattr(self._storage, field_name) + cobj = getattr(self._storage.contents, field_name) return ctypes2lltype(T, cobj) def __setattr__(self, field_name, value): @@ -430,17 +435,17 @@ object.__setattr__(self, field_name, value) # '_xxx' attributes else: cobj = lltype2ctypes(value) - setattr(self._storage, field_name, cobj) + setattr(self._storage.contents, field_name, cobj) class _array_mixin(_parentable_mixin): """Mixin added to _array containers when they become ctypes-based.""" __slots__ = () def getitem(self, index, uninitialized_ok=False): - return self._storage._getitem(index) + return self._storage.contents._getitem(index) def setitem(self, index, value): - self._storage._setitem(index, value) + self._storage.contents._setitem(index, value) class _array_of_unknown_length(_parentable_mixin, lltype._parentable): _kind = "array" @@ -451,10 +456,10 @@ return 0, sys.maxint def getitem(self, index, uninitialized_ok=False): - return self._storage._getitem(index, boundscheck=False) + return self._storage.contents._getitem(index, boundscheck=False) def setitem(self, index, value): - self._storage._setitem(index, value, boundscheck=False) + self._storage.contents._setitem(index, value, boundscheck=False) def getitems(self): if self._TYPE.OF != lltype.Char: @@ -476,7 +481,7 @@ __slots__ = () def getlength(self): - return self._storage.length + return self._storage.contents.length def getbounds(self): return 0, self.getlength() @@ -653,17 +658,18 @@ container._ctypes_storage_was_allocated() if isinstance(T.TO, lltype.OpaqueType): - return container._storage + return container._storage.value storage = container._storage - p = ctypes.pointer(storage) + p = storage if index: p = ctypes.cast(p, ctypes.c_void_p) p = ctypes.c_void_p(p.value + index) c_tp = get_ctypes_type(T.TO) - storage._normalized_ctype = c_tp - if normalize and hasattr(storage, '_normalized_ctype'): - p = ctypes.cast(p, ctypes.POINTER(storage._normalized_ctype)) + storage.contents._normalized_ctype = c_tp + if normalize and hasattr(storage.contents, '_normalized_ctype'): + normalized_ctype = storage.contents._normalized_ctype + p = ctypes.cast(p, ctypes.POINTER(normalized_ctype)) if lltype.typeOf(llobj) == llmemory.GCREF: p = ctypes.cast(p, ctypes.c_void_p) return p @@ -707,13 +713,13 @@ cobjheader = ctypes.cast(cobj, get_ctypes_type(lltype.Ptr(OBJECT))) struct_use_ctypes_storage(containerheader, - cobjheader.contents) + cobjheader) REAL_TYPE = get_rtyper().get_type_for_typeptr( containerheader.typeptr) REAL_T = lltype.Ptr(REAL_TYPE) cobj = ctypes.cast(cobj, get_ctypes_type(REAL_T)) container = lltype._struct(REAL_TYPE) - struct_use_ctypes_storage(container, cobj.contents) + struct_use_ctypes_storage(container, cobj) if REAL_TYPE != T.TO: p = container._as_ptr() container = lltype.cast_pointer(T, p)._as_obj() @@ -728,10 +734,10 @@ elif isinstance(T.TO, lltype.Array): if T.TO._hints.get('nolength', False): container = _array_of_unknown_length(T.TO) - container._storage = cobj.contents + container._storage = cobj else: container = _array_of_known_length(T.TO) - container._storage = cobj.contents + container._storage = cobj elif isinstance(T.TO, lltype.FuncType): cobjkey = intmask(ctypes.cast(cobj, ctypes.c_void_p).value) if cobjkey in _int2obj: @@ -745,7 +751,8 @@ container = _llgcopaque(cobj) else: container = lltype._opaque(T.TO) - container._storage = ctypes.cast(cobj, ctypes.c_void_p) + cbuf = ctypes.cast(cobj, ctypes.c_void_p) + add_storage(container, _parentable_mixin, cbuf) else: raise NotImplementedError(T) llobj = lltype._ptr(T, container, solid=True) Modified: pypy/trunk/pypy/rpython/lltypesystem/test/test_ll2ctypes.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/test/test_ll2ctypes.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/test/test_ll2ctypes.py Mon Sep 20 13:09:34 2010 @@ -353,6 +353,8 @@ assert tmppath.check(file=1) assert not ALLOCATED # detects memory leaks in the test + assert rffi.cast(FILEP, -1) == rffi.cast(FILEP, -1) + def test_simple_cast(self): assert rffi.cast(rffi.SIGNEDCHAR, 0x123456) == 0x56 assert rffi.cast(rffi.SIGNEDCHAR, 0x123481) == -127 From antocuni at codespeak.net Mon Sep 20 13:15:07 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Mon, 20 Sep 2010 13:15:07 +0200 (CEST) Subject: [pypy-svn] r77198 - pypy/branch/resoperation-refactoring/pypy/jit/metainterp Message-ID: <20100920111507.8E1D3282BE3@codespeak.net> Author: antocuni Date: Mon Sep 20 13:15:06 2010 New Revision: 77198 Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py Log: typo Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py Mon Sep 20 13:15:06 2010 @@ -62,7 +62,7 @@ def getdescr(self): return None - def setdescr(self): + def setdescr(self, descr): raise NotImplementedError # common methods From antocuni at codespeak.net Mon Sep 20 13:15:54 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Mon, 20 Sep 2010 13:15:54 +0200 (CEST) Subject: [pypy-svn] r77199 - in pypy/branch/resoperation-refactoring/pypy/jit/backend: llsupport/test test Message-ID: <20100920111554.3724E282BE3@codespeak.net> Author: antocuni Date: Mon Sep 20 13:15:52 2010 New Revision: 77199 Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/test/test_gc.py pypy/branch/resoperation-refactoring/pypy/jit/backend/test/runner_test.py Log: fix tests Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/test/test_gc.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/test/test_gc.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/llsupport/test/test_gc.py Mon Sep 20 13:15:52 2010 @@ -269,7 +269,7 @@ def test_get_rid_of_debug_merge_point(self): operations = [ - ResOperation(rop.DEBUG_MERGE_POINT, [], None), + ResOperation(rop.DEBUG_MERGE_POINT, ['dummy'], None), ] gc_ll_descr = self.gc_ll_descr gc_ll_descr.rewrite_assembler(None, operations) Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/test/runner_test.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/test/runner_test.py Mon Sep 20 13:15:52 2010 @@ -78,7 +78,8 @@ operations[0].setfailargs([]) if not descr: descr = BasicFailDescr(1) - operations[0].setdescr(descr) + if descr is not None: + operations[0].setdescr(descr) inputargs = [] for box in valueboxes: if isinstance(box, Box) and box not in inputargs: From afa at codespeak.net Mon Sep 20 13:19:54 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Mon, 20 Sep 2010 13:19:54 +0200 (CEST) Subject: [pypy-svn] r77200 - in pypy/branch/fast-forward: . pypy/config pypy/doc/discussion pypy/interpreter pypy/jit/backend/x86 pypy/jit/metainterp/optimizeopt pypy/jit/metainterp/test pypy/jit/tl pypy/module/__builtin__ pypy/module/__builtin__/test pypy/module/_ssl/test pypy/module/array pypy/module/array/test pypy/module/gc pypy/module/gc/test pypy/module/pypyjit/test pypy/module/select pypy/module/select/test pypy/module/sys pypy/rlib pypy/rlib/test pypy/rpython pypy/rpython/lltypesystem pypy/rpython/lltypesystem/test pypy/rpython/memory pypy/rpython/memory/gc pypy/rpython/memory/gc/test pypy/rpython/memory/gctransform pypy/rpython/memory/test pypy/rpython/numpy pypy/translator pypy/translator/c pypy/translator/c/src pypy/translator/c/test Message-ID: <20100920111954.CFC7C282BE3@codespeak.net> Author: afa Date: Mon Sep 20 13:19:51 2010 New Revision: 77200 Added: pypy/branch/fast-forward/pypy/module/gc/app_referents.py - copied unchanged from r77197, pypy/trunk/pypy/module/gc/app_referents.py pypy/branch/fast-forward/pypy/module/gc/referents.py - copied unchanged from r77197, pypy/trunk/pypy/module/gc/referents.py pypy/branch/fast-forward/pypy/module/gc/test/test_app_referents.py - copied unchanged from r77197, pypy/trunk/pypy/module/gc/test/test_app_referents.py pypy/branch/fast-forward/pypy/module/gc/test/test_referents.py - copied unchanged from r77197, pypy/trunk/pypy/module/gc/test/test_referents.py pypy/branch/fast-forward/pypy/rpython/memory/gc/inspect.py - copied unchanged from r77197, pypy/trunk/pypy/rpython/memory/gc/inspect.py pypy/branch/fast-forward/pypy/rpython/memory/gc/minimark.py - copied unchanged from r77197, pypy/trunk/pypy/rpython/memory/gc/minimark.py pypy/branch/fast-forward/pypy/rpython/memory/gc/minimarkpage.py - copied unchanged from r77197, pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimark.py - copied unchanged from r77197, pypy/trunk/pypy/rpython/memory/gc/test/test_minimark.py pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimarkpage.py - copied unchanged from r77197, pypy/trunk/pypy/rpython/memory/gc/test/test_minimarkpage.py Removed: pypy/branch/fast-forward/pypy/rpython/numpy/ Modified: pypy/branch/fast-forward/ (props changed) pypy/branch/fast-forward/pypy/config/translationoption.py pypy/branch/fast-forward/pypy/doc/discussion/finalizer-order.txt pypy/branch/fast-forward/pypy/interpreter/baseobjspace.py pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/__init__.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/heap.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/intbounds.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/optimizer.py (contents, props changed) pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/rewrite.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/fast-forward/pypy/jit/tl/pypyjit_demo.py pypy/branch/fast-forward/pypy/module/__builtin__/functional.py pypy/branch/fast-forward/pypy/module/__builtin__/test/test_minmax.py pypy/branch/fast-forward/pypy/module/_ssl/test/test_ssl.py pypy/branch/fast-forward/pypy/module/array/interp_array.py pypy/branch/fast-forward/pypy/module/array/test/test_array_old.py (props changed) pypy/branch/fast-forward/pypy/module/gc/__init__.py pypy/branch/fast-forward/pypy/module/gc/interp_gc.py pypy/branch/fast-forward/pypy/module/gc/test/test_gc.py pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py pypy/branch/fast-forward/pypy/module/select/interp_select.py pypy/branch/fast-forward/pypy/module/select/test/test_select.py pypy/branch/fast-forward/pypy/module/sys/version.py pypy/branch/fast-forward/pypy/rlib/_rsocket_rffi.py pypy/branch/fast-forward/pypy/rlib/rarithmetic.py pypy/branch/fast-forward/pypy/rlib/rgc.py pypy/branch/fast-forward/pypy/rlib/rstring.py pypy/branch/fast-forward/pypy/rlib/rwin32.py pypy/branch/fast-forward/pypy/rlib/test/test_rgc.py pypy/branch/fast-forward/pypy/rpython/llinterp.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/llarena.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/llheap.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/llmemory.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/lloperation.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_ll2ctypes.py pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py pypy/branch/fast-forward/pypy/rpython/memory/gc/markcompact.py pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py pypy/branch/fast-forward/pypy/rpython/memory/gctransform/framework.py pypy/branch/fast-forward/pypy/rpython/memory/gctypelayout.py pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py pypy/branch/fast-forward/pypy/rpython/memory/lltypelayout.py pypy/branch/fast-forward/pypy/rpython/memory/support.py pypy/branch/fast-forward/pypy/rpython/memory/test/test_gc.py pypy/branch/fast-forward/pypy/rpython/memory/test/test_support.py pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py pypy/branch/fast-forward/pypy/rpython/rptr.py pypy/branch/fast-forward/pypy/translator/c/funcgen.py pypy/branch/fast-forward/pypy/translator/c/src/mem.h pypy/branch/fast-forward/pypy/translator/c/test/test_newgc.py pypy/branch/fast-forward/pypy/translator/exceptiontransform.py Log: merge from trunk Modified: pypy/branch/fast-forward/pypy/config/translationoption.py ============================================================================== --- pypy/branch/fast-forward/pypy/config/translationoption.py (original) +++ pypy/branch/fast-forward/pypy/config/translationoption.py Mon Sep 20 13:19:51 2010 @@ -52,7 +52,7 @@ # gc ChoiceOption("gc", "Garbage Collection Strategy", ["boehm", "ref", "marksweep", "semispace", "statistics", - "generation", "hybrid", "markcompact", "none"], + "generation", "hybrid", "markcompact", "minimark", "none"], "ref", requires={ "ref": [("translation.rweakref", False), # XXX ("translation.gctransformer", "ref")], @@ -65,6 +65,7 @@ "hybrid": [("translation.gctransformer", "framework")], "boehm": [("translation.gctransformer", "boehm")], "markcompact": [("translation.gctransformer", "framework")], + "minimark": [("translation.gctransformer", "framework")], }, cmdline="--gc"), ChoiceOption("gctransformer", "GC transformer that is used - internal", Modified: pypy/branch/fast-forward/pypy/doc/discussion/finalizer-order.txt ============================================================================== --- pypy/branch/fast-forward/pypy/doc/discussion/finalizer-order.txt (original) +++ pypy/branch/fast-forward/pypy/doc/discussion/finalizer-order.txt Mon Sep 20 13:19:51 2010 @@ -133,8 +133,8 @@ that doesn't change the state of an object, we don't follow its children recursively. -In practice we can encode the 4 states with a single extra bit in the -header: +In practice, in the SemiSpace, Generation and Hybrid GCs, we can encode +the 4 states with a single extra bit in the header: ===== ============= ======== ==================== state is_forwarded? bit set? bit set in the copy? @@ -150,3 +150,17 @@ bit in the copy at the end, to clean up before the next collection (which means recursively bumping the state from 2 to 3 in the final loop). + +In the MiniMark GC, the objects don't move (apart from when they are +copied out of the nursery), but we use the flag GCFLAG_VISITED to mark +objects that survive, so we can also have a single extra bit for +finalizers: + + ===== ============== ============================ + state GCFLAG_VISITED GCFLAG_FINALIZATION_ORDERING + ===== ============== ============================ + 0 no no + 1 no yes + 2 yes yes + 3 yes no + ===== ============== ============================ Modified: pypy/branch/fast-forward/pypy/interpreter/baseobjspace.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/baseobjspace.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/baseobjspace.py Mon Sep 20 13:19:51 2010 @@ -71,7 +71,8 @@ space.wrap("__class__ assignment: only for heap types")) def user_setup(self, space, w_subtype): - assert False, "only for interp-level user subclasses from typedef.py" + raise NotImplementedError("only for interp-level user subclasses " + "from typedef.py") def getname(self, space, default): try: Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py Mon Sep 20 13:19:51 2010 @@ -1,16 +1,17 @@ import sys, os from pypy.jit.backend.llsupport import symbolic from pypy.jit.metainterp.history import Const, Box, BoxInt, BoxPtr, BoxFloat -from pypy.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT,\ - LoopToken +from pypy.jit.metainterp.history import (AbstractFailDescr, INT, REF, FLOAT, + LoopToken) from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.tool.uid import fixid -from pypy.jit.backend.x86.regalloc import RegAlloc, \ - X86RegisterManager, X86XMMRegisterManager, get_ebp_ofs +from pypy.jit.backend.x86.regalloc import (RegAlloc, X86RegisterManager, + X86XMMRegisterManager, get_ebp_ofs) -from pypy.jit.backend.x86.arch import FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, IS_X86_32, IS_X86_64 +from pypy.jit.backend.x86.arch import (FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, + IS_X86_32, IS_X86_64) from pypy.jit.backend.x86.regloc import (eax, ecx, edx, ebx, esp, ebp, esi, edi, @@ -474,6 +475,7 @@ # align, e.g. for Mac OS X aligned_words = align_stack_words(words+2)-2 # 2 = EIP+EBP mc.writeimm32(-WORD * aligned_words) + mc.valgrind_invalidated() mc.done() def _call_header(self): @@ -596,6 +598,7 @@ target = newlooptoken._x86_direct_bootstrap_code mc = codebuf.InMemoryCodeBuilder(oldadr, oldadr + 16) mc.JMP(imm(target)) + mc.valgrind_invalidated() mc.done() def _assemble_bootstrap_code(self, inputargs, arglocs): Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/__init__.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/__init__.py Mon Sep 20 13:19:51 2010 @@ -1,8 +1,8 @@ -from optimizer import Optimizer -from rewrite import OptRewrite -from intbounds import OptIntBounds -from virtualize import OptVirtualize -from heap import OptHeap +from pypy.jit.metainterp.optimizeopt.optimizer import Optimizer +from pypy.jit.metainterp.optimizeopt.rewrite import OptRewrite +from pypy.jit.metainterp.optimizeopt.intbounds import OptIntBounds +from pypy.jit.metainterp.optimizeopt.virtualize import OptVirtualize +from pypy.jit.metainterp.optimizeopt.heap import OptHeap def optimize_loop_1(metainterp_sd, loop, virtuals=True): """Optimize loop.operations to make it match the input of loop.specnodes Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/heap.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/heap.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/heap.py Mon Sep 20 13:19:51 2010 @@ -2,7 +2,7 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.rlib.objectmodel import we_are_translated -from optimizer import Optimization +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization class CachedArrayItems(object): def __init__(self): Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/intbounds.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/intbounds.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/intbounds.py Mon Sep 20 13:19:51 2010 @@ -1,6 +1,7 @@ -from optimizer import Optimization, CONST_1, CONST_0 +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization, CONST_1, CONST_0 from pypy.jit.metainterp.optimizeutil import _findall -from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded +from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded, \ + IntLowerBound from pypy.jit.metainterp.history import Const, ConstInt from pypy.jit.metainterp.resoperation import rop, ResOperation @@ -25,7 +26,7 @@ b = v.intbound if b.has_lower and b.has_upper and b.lower == b.upper: v.make_constant(ConstInt(b.lower)) - + try: op = self.optimizer.producer[box] except KeyError: @@ -35,7 +36,7 @@ if opnum == value: func(self, op) break - + def optimize_GUARD_TRUE(self, op): self.emit_operation(op) self.propagate_bounds_backward(op.args[0]) @@ -47,7 +48,7 @@ v1 = self.getvalue(op.args[0]) v2 = self.getvalue(op.args[1]) self.emit_operation(op) - + r = self.getvalue(op.result) if v2.is_constant(): val = v2.box.getint() @@ -57,14 +58,14 @@ val = v1.box.getint() if val >= 0: r.intbound.intersect(IntBound(0,val)) - + def optimize_INT_SUB(self, op): v1 = self.getvalue(op.args[0]) v2 = self.getvalue(op.args[1]) self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(v1.intbound.sub_bound(v2.intbound)) - + def optimize_INT_ADD(self, op): v1 = self.getvalue(op.args[0]) v2 = self.getvalue(op.args[1]) @@ -93,7 +94,7 @@ self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(resbound) - + def optimize_INT_SUB_OVF(self, op): v1 = self.getvalue(op.args[0]) v2 = self.getvalue(op.args[1]) @@ -123,7 +124,7 @@ self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(resbound) - + def optimize_INT_LT(self, op): v1 = self.getvalue(op.args[0]) v2 = self.getvalue(op.args[1]) @@ -171,9 +172,9 @@ self.make_constant_int(op.result, 0) elif v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 0) - else: + else: self.emit_operation(op) - + def optimize_INT_NE(self, op): v1 = self.getvalue(op.args[0]) v2 = self.getvalue(op.args[1]) @@ -181,17 +182,24 @@ self.make_constant_int(op.result, 1) elif v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 1) - else: + else: self.emit_operation(op) - + + def optimize_ARRAYLEN_GC(self, op): + self.emit_operation(op) + v1 = self.getvalue(op.result) + v1.intbound.make_ge(IntLowerBound(0)) + + optimize_STRLEN = optimize_ARRAYLEN_GC + def make_int_lt(self, args): v1 = self.getvalue(args[0]) v2 = self.getvalue(args[1]) if v1.intbound.make_lt(v2.intbound): - self.propagate_bounds_backward(args[0]) + self.propagate_bounds_backward(args[0]) if v2.intbound.make_gt(v1.intbound): self.propagate_bounds_backward(args[1]) - + def make_int_le(self, args): v1 = self.getvalue(args[0]) @@ -267,7 +275,7 @@ r = self.getvalue(op.result) b = r.intbound.sub_bound(v2.intbound) if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.args[0]) b = r.intbound.sub_bound(v1.intbound) if v2.intbound.intersect(b): self.propagate_bounds_backward(op.args[1]) @@ -278,10 +286,10 @@ r = self.getvalue(op.result) b = r.intbound.add_bound(v2.intbound) if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.args[0]) b = r.intbound.sub_bound(v1.intbound).mul(-1) if v2.intbound.intersect(b): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.args[1]) def propagate_bounds_INT_MUL(self, op): v1 = self.getvalue(op.args[0]) @@ -289,7 +297,7 @@ r = self.getvalue(op.result) b = r.intbound.div_bound(v2.intbound) if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.args[0]) b = r.intbound.div_bound(v1.intbound) if v2.intbound.intersect(b): self.propagate_bounds_backward(op.args[1]) @@ -300,4 +308,3 @@ optimize_ops = _findall(OptIntBounds, 'optimize_') propagate_bounds_ops = _findall(OptIntBounds, 'propagate_bounds_') - Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/optimizer.py Mon Sep 20 13:19:51 2010 @@ -11,7 +11,7 @@ from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.rpython.lltypesystem import lltype from pypy.jit.metainterp.history import AbstractDescr, make_hashable_int -from intutils import IntBound, IntUnbounded +from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded LEVEL_UNKNOWN = '\x00' LEVEL_NONNULL = '\x01' @@ -187,7 +187,7 @@ class Optimizer(Optimization): - def __init__(self, metainterp_sd, loop, optimizations=[], virtuals=True): + def __init__(self, metainterp_sd, loop, optimizations=None, virtuals=True): self.metainterp_sd = metainterp_sd self.cpu = metainterp_sd.cpu self.loop = loop @@ -199,10 +199,8 @@ self.pure_operations = args_dict() self.producer = {} self.pendingfields = [] - - if len(optimizations) == 0: - self.first_optimization = self - else: + + if optimizations: self.first_optimization = optimizations[0] for i in range(1, len(optimizations)): optimizations[i - 1].next_optimization = optimizations[i] @@ -210,6 +208,8 @@ for o in optimizations: o.optimizer = self o.setup(virtuals) + else: + self.first_optimization = self def forget_numberings(self, virtualbox): self.metainterp_sd.profiler.count(jitprof.OPT_FORCINGS) @@ -351,9 +351,9 @@ if op.opnum == rop.GUARD_VALUE: if self.getvalue(op.args[0]) in self.bool_boxes: # Hack: turn guard_value(bool) into guard_true/guard_false. - # This is done after the operation is emitted, to let - # store_final_boxes_in_guard set the guard_opnum field - # of the descr to the original rop.GUARD_VALUE. + # This is done after the operation is emitted to let + # store_final_boxes_in_guard set the guard_opnum field of the + # descr to the original rop.GUARD_VALUE. constvalue = op.args[1].getint() if constvalue == 0: opnum = rop.GUARD_FALSE Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/rewrite.py Mon Sep 20 13:19:51 2010 @@ -1,11 +1,11 @@ -from optimizer import * +from pypy.jit.metainterp.optimizeopt.optimizer import * from pypy.jit.metainterp.resoperation import opboolinvers, opboolreflex from pypy.jit.metainterp.history import ConstInt from pypy.jit.metainterp.optimizeutil import _findall from pypy.jit.metainterp.resoperation import rop, ResOperation class OptRewrite(Optimization): - """Rewrite operations into equvivialent, cheeper operations. + """Rewrite operations into equivalent, cheaper operations. This includes already executed operations and constants. """ Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/virtualize.py Mon Sep 20 13:19:51 2010 @@ -6,7 +6,7 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.optimizeutil import _findall from pypy.rlib.objectmodel import we_are_translated -from optimizer import * +from pypy.jit.metainterp.optimizeopt.optimizer import * class AbstractVirtualValue(OptValue): Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py Mon Sep 20 13:19:51 2010 @@ -33,7 +33,7 @@ self.profiler = EmptyProfiler() self.options = Fake() self.globaldata = Fake() - + def test_store_final_boxes_in_guard(): from pypy.jit.metainterp.compile import ResumeGuardDescr from pypy.jit.metainterp.resume import tag, TAGBOX @@ -75,7 +75,7 @@ assert lst3 == [LLtypeMixin.valuedescr] lst4 = virt1._get_field_descr_list() assert lst3 is lst4 - + virt2 = virtualize.AbstractVirtualStructValue(opt, None) lst5 = virt2._get_field_descr_list() assert lst5 is lst1 @@ -489,7 +489,7 @@ jump() """ self.optimize_loop(ops, 'Constant(myptr)', expected) - + def test_ooisnull_oononnull_1(self): ops = """ [p0] @@ -842,7 +842,7 @@ jump(f, f1) """ self.optimize_loop(ops, 'Not, Virtual(node_vtable, floatdescr=Not)', - expected, checkspecnodes=False) + expected, checkspecnodes=False) def test_virtual_2(self): ops = """ @@ -2171,7 +2171,7 @@ jump(i1, i0) """ self.optimize_loop(ops, 'Not, Not', expected) - + def test_fold_partially_constant_ops(self): ops = """ [i0] @@ -2183,7 +2183,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + ops = """ [i0] i1 = int_add(i0, 0) @@ -2194,7 +2194,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + ops = """ [i0] i1 = int_add(0, i0) @@ -2205,7 +2205,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + # ---------- def make_fail_descr(self): @@ -3119,7 +3119,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_noguard(self): ops = """ [i0] @@ -3134,7 +3134,7 @@ jump(i2) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_noopt(self): ops = """ [i0] @@ -3153,7 +3153,7 @@ jump(4) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_rev(self): ops = """ [i0] @@ -3170,7 +3170,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_tripple(self): ops = """ [i0] @@ -3189,7 +3189,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_add(self): ops = """ [i0] @@ -3204,11 +3204,11 @@ [i0] i1 = int_lt(i0, 4) guard_true(i1) [] - i2 = int_add(i0, 10) + i2 = int_add(i0, 10) jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_add_before(self): ops = """ [i0] @@ -3227,7 +3227,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_add_ovf(self): ops = """ [i0] @@ -3243,11 +3243,11 @@ [i0] i1 = int_lt(i0, 4) guard_true(i1) [] - i2 = int_add(i0, 10) + i2 = int_add(i0, 10) jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_add_ovf_before(self): ops = """ [i0] @@ -3268,7 +3268,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_sub(self): ops = """ [i0] @@ -3283,11 +3283,11 @@ [i0] i1 = int_lt(i0, 4) guard_true(i1) [] - i2 = int_sub(i0, 10) + i2 = int_sub(i0, 10) jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_sub_before(self): ops = """ [i0] @@ -3306,7 +3306,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_ltle(self): ops = """ [i0] @@ -3357,7 +3357,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_gtge(self): ops = """ [i0] @@ -3374,7 +3374,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_gegt(self): ops = """ [i0] @@ -3414,6 +3414,42 @@ """ self.optimize_loop(ops, 'Not', expected) + def test_bound_arraylen(self): + ops = """ + [i0, p0] + p1 = new_array(i0, descr=arraydescr) + i1 = arraylen_gc(p1) + i2 = int_gt(i1, -1) + guard_true(i2) [] + setarrayitem_gc(p0, 0, p1) + jump(i0, p0) + """ + # The dead arraylen_gc will be eliminated by the backend. + expected = """ + [i0, p0] + p1 = new_array(i0, descr=arraydescr) + i1 = arraylen_gc(p1) + setarrayitem_gc(p0, 0, p1) + jump(i0, p0) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_bound_strlen(self): + ops = """ + [p0] + i0 = strlen(p0) + i1 = int_ge(i0, 0) + guard_true(i1) [] + jump(p0) + """ + # The dead strlen will be eliminated be the backend. + expected = """ + [p0] + i0 = strlen(p0) + jump(p0) + """ + self.optimize_loop(ops, 'Not', expected) + def test_addsub_const(self): ops = """ [i0] @@ -3558,7 +3594,7 @@ i14 = int_gt(i1, 10) guard_true(i14) [] i15 = int_ge(i1, 20) - guard_true(i15) [] + guard_true(i15) [] jump(i1) """ expected = """ @@ -3571,7 +3607,7 @@ i14 = int_gt(i1, 10) guard_true(i14) [] i15 = int_ge(i1, 20) - guard_true(i15) [] + guard_true(i15) [] jump(i1) """ self.optimize_loop(ops, 'Not', expected) @@ -3818,7 +3854,7 @@ """ self.optimize_loop(ops, 'Not, Not', expected) - + ##class TestOOtype(BaseTestOptimizeOpt, OOtypeMixin): @@ -3835,7 +3871,7 @@ ## jump(1) ## """ ## self.optimize_loop(ops, 'Not', expected) - + ## def test_instanceof_guard_class(self): ## ops = """ ## [i0, p0] Modified: pypy/branch/fast-forward/pypy/jit/tl/pypyjit_demo.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/tl/pypyjit_demo.py (original) +++ pypy/branch/fast-forward/pypy/jit/tl/pypyjit_demo.py Mon Sep 20 13:19:51 2010 @@ -39,16 +39,24 @@ try: from array import array + + def coords(w,h): + y = 0 + while y < h: + x = 0 + while x < w: + yield x,y + x += 1 + y += 1 + def f(img): - i=0 sa=0 - while i < img.__len__(): - sa+=img[i] - i+=1 + for x, y in coords(4,4): + sa += x * y return sa - img=array('h',(1,2,3,4)) - print f(img) + #img=array('h',(1,2,3,4)) + print f(3) except Exception, e: print "Exception: ", type(e) print e Modified: pypy/branch/fast-forward/pypy/module/__builtin__/functional.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/__builtin__/functional.py (original) +++ pypy/branch/fast-forward/pypy/module/__builtin__/functional.py Mon Sep 20 13:19:51 2010 @@ -13,6 +13,7 @@ from pypy.rlib.objectmodel import specialize from pypy.module.__builtin__.app_functional import range as app_range from inspect import getsource, getfile +from pypy.rlib.jit import unroll_safe """ Implementation of the common integer case of range. Instead of handling @@ -96,12 +97,32 @@ return W_RangeListObject(start, step, howmany) + at unroll_safe @specialize.arg(2) def min_max(space, args, implementation_of): if implementation_of == "max": compare = space.gt else: compare = space.lt + + args_w = args.arguments_w + if len(args_w) == 2 and not args.keywords: + # Unrollable case + w_max_item = None + for w_item in args_w: + if w_max_item is None or \ + space.is_true(compare(w_item, w_max_item)): + w_max_item = w_item + return w_max_item + else: + return min_max_loop(space, args, implementation_of) + + at specialize.arg(2) +def min_max_loop(space, args, implementation_of): + if implementation_of == "max": + compare = space.gt + else: + compare = space.lt args_w = args.arguments_w if len(args_w) > 1: w_sequence = space.newtuple(args_w) Modified: pypy/branch/fast-forward/pypy/module/__builtin__/test/test_minmax.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/__builtin__/test/test_minmax.py (original) +++ pypy/branch/fast-forward/pypy/module/__builtin__/test/test_minmax.py Mon Sep 20 13:19:51 2010 @@ -51,3 +51,37 @@ def test_max_empty(self): raises(ValueError, max, []) + +class AppTestMaxTuple: + + def test_max_usual(self): + assert max((1, 2, 3)) == 3 + + def test_max_floats(self): + assert max((0.1, 2.7, 14.7)) == 14.7 + + def test_max_chars(self): + assert max(('a', 'b', 'c')) == 'c' + + def test_max_strings(self): + assert max(('aaa', 'bbb', 'c')) == 'c' + + def test_max_mixed(self): + assert max(('1', 2, 3, 'aa')) == 'aa' + +class AppTestMinList: + + def test_min_usual(self): + assert min([1, 2, 3]) == 1 + + def test_min_floats(self): + assert min([0.1, 2.7, 14.7]) == 0.1 + + def test_min_chars(self): + assert min(['a', 'b', 'c']) == 'a' + + def test_min_strings(self): + assert min(['aaa', 'bbb', 'c']) == 'aaa' + + def test_min_mixed(self): + assert min(['1', 2, 3, 'aa']) == 2 Modified: pypy/branch/fast-forward/pypy/module/_ssl/test/test_ssl.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_ssl/test/test_ssl.py (original) +++ pypy/branch/fast-forward/pypy/module/_ssl/test/test_ssl.py Mon Sep 20 13:19:51 2010 @@ -60,8 +60,8 @@ cls.space = space def setup_method(self, method): - # https://connect.sigen-ca.si/index-en.html - ADDR = "connect.sigen-ca.si", 443 + # https://codespeak.net/ + ADDR = "codespeak.net", 443 self.w_s = self.space.appexec([self.space.wrap(ADDR)], """(ADDR): import socket Modified: pypy/branch/fast-forward/pypy/module/array/interp_array.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/array/interp_array.py (original) +++ pypy/branch/fast-forward/pypy/module/array/interp_array.py Mon Sep 20 13:19:51 2010 @@ -528,12 +528,15 @@ def array_tostring__Array(space, self): cbuf = self.charbuf() - s = '' - i = 0 - while i < self.len * mytype.bytes: - s += cbuf[i] - i += 1 + s = ''.join([cbuf[i] for i in xrange(self.len * mytype.bytes)]) return self.space.wrap(s) +## +## s = '' +## i = 0 +## while i < self.len * mytype.bytes: +## s += cbuf[i] +## i += 1 +## return self.space.wrap(s) def array_fromfile__Array_ANY_ANY(space, self, w_f, w_n): if not isinstance(w_f, W_File): Modified: pypy/branch/fast-forward/pypy/module/gc/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/gc/__init__.py (original) +++ pypy/branch/fast-forward/pypy/module/gc/__init__.py Mon Sep 20 13:19:51 2010 @@ -10,13 +10,25 @@ 'collect': 'interp_gc.collect', 'enable_finalizers': 'interp_gc.enable_finalizers', 'disable_finalizers': 'interp_gc.disable_finalizers', - 'estimate_heap_size': 'interp_gc.estimate_heap_size', 'garbage' : 'space.newlist([])', #'dump_heap_stats': 'interp_gc.dump_heap_stats', } def __init__(self, space, w_name): - ts = space.config.translation.type_system - if ts == 'ootype': - del self.interpleveldefs['dump_heap_stats'] + if (not space.config.translating or + space.config.translation.gctransformer == "framework"): + self.appleveldefs.update({ + 'dump_rpy_heap': 'app_referents.dump_rpy_heap', + }) + self.interpleveldefs.update({ + 'get_rpy_roots': 'referents.get_rpy_roots', + 'get_rpy_referents': 'referents.get_rpy_referents', + 'get_rpy_memory_usage': 'referents.get_rpy_memory_usage', + 'get_rpy_type_index': 'referents.get_rpy_type_index', + 'get_objects': 'referents.get_objects', + 'get_referents': 'referents.get_referents', + 'get_referrers': 'referents.get_referrers', + '_dump_rpy_heap': 'referents._dump_rpy_heap', + 'GcRef': 'referents.W_GcRef', + }) MixedModule.__init__(self, space, w_name) Modified: pypy/branch/fast-forward/pypy/module/gc/interp_gc.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/gc/interp_gc.py (original) +++ pypy/branch/fast-forward/pypy/module/gc/interp_gc.py Mon Sep 20 13:19:51 2010 @@ -24,36 +24,6 @@ # ____________________________________________________________ -import sys -platform = sys.platform - -def estimate_heap_size(space): - # XXX should be done with the help of the GCs - if platform == "linux2": - import os - pid = os.getpid() - try: - fd = os.open("/proc/" + str(pid) + "/status", os.O_RDONLY, 0777) - except OSError: - pass - else: - try: - content = os.read(fd, 1000000) - finally: - os.close(fd) - lines = content.split("\n") - for line in lines: - if line.startswith("VmSize:"): - start = line.find(" ") # try to ignore tabs - assert start > 0 - stop = len(line) - 3 - assert stop > 0 - result = int(line[start:stop].strip(" ")) * 1024 - return space.wrap(result) - raise OperationError(space.w_RuntimeError, - space.wrap("can't estimate the heap size")) -estimate_heap_size.unwrap_spec = [ObjSpace] - def dump_heap_stats(space, filename): tb = rgc._heap_stats() if not tb: Modified: pypy/branch/fast-forward/pypy/module/gc/test/test_gc.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/gc/test/test_gc.py (original) +++ pypy/branch/fast-forward/pypy/module/gc/test/test_gc.py Mon Sep 20 13:19:51 2010 @@ -59,13 +59,6 @@ raises(ValueError, gc.enable_finalizers) runtest(True) - def test_estimate_heap_size(self): - import sys, gc - if sys.platform == "linux2": - assert gc.estimate_heap_size() > 1024 - else: - raises(RuntimeError, gc.estimate_heap_size) - def test_enable(self): import gc assert gc.isenabled() Modified: pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py (original) +++ pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py Mon Sep 20 13:19:51 2010 @@ -762,6 +762,8 @@ else: n = 215 + print + print 'Test:', e1, e2, n, res self.run_source(''' class tst: pass @@ -779,6 +781,25 @@ return sa '''%(e1, e2), n, ([], res)) + def test_boolrewrite_ptr_single(self): + self.run_source(''' + class tst: + pass + def main(): + a = tst() + b = tst() + c = tst() + sa = 0 + for i in range(1000): + if a == b: sa += 1 + else: sa += 2 + if a != b: sa += 10000 + else: sa += 20000 + if i > 750: a = b + return sa + ''', 215, ([], 12481752)) + assert False + def test_array_sum(self): for tc, maxops in zip('bhilBHILfd', (38,) * 6 + (40, 40, 41, 38)): res = 19352859 @@ -1059,7 +1080,38 @@ ''', 170, ([], 1239690.0)) - + def test_min_max(self): + self.run_source(''' + def main(): + i=0 + sa=0 + while i < 2000: + sa+=min(max(i, 3000), 4000) + i+=1 + return sa + ''', 51, ([], 2000*3000)) + + def test_silly_max(self): + self.run_source(''' + def main(): + i=2 + sa=0 + while i < 2000: + sa+=max(*range(i)) + i+=1 + return sa + ''', 125, ([], 1997001)) + + def test_iter_max(self): + self.run_source(''' + def main(): + i=2 + sa=0 + while i < 2000: + sa+=max(range(i)) + i+=1 + return sa + ''', 88, ([], 1997001)) # test_circular Modified: pypy/branch/fast-forward/pypy/module/select/interp_select.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/select/interp_select.py (original) +++ pypy/branch/fast-forward/pypy/module/select/interp_select.py Mon Sep 20 13:19:51 2010 @@ -54,14 +54,11 @@ if space.is_w(w_timeout, space.w_None): timeout = -1 else: - # rationale for computing directly integer, instead - # of float + math.cell is that - # we have for free overflow check and noone really - # cares (since CPython does not try too hard to have - # a ceiling of value) + # we want to be compatible with cpython and also accept things + # that can be casted to integer (I think) try: # compute the integer - timeout = space.int_w(w_timeout) + timeout = space.int_w(space.int(w_timeout)) except (OverflowError, ValueError): raise OperationError(space.w_ValueError, space.wrap("math range error")) Modified: pypy/branch/fast-forward/pypy/module/select/test/test_select.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/select/test/test_select.py (original) +++ pypy/branch/fast-forward/pypy/module/select/test/test_select.py Mon Sep 20 13:19:51 2010 @@ -210,6 +210,14 @@ assert len(res[2]) == 0 assert res[0][0] == res[1][0] + def test_poll(self): + import select + class A(object): + def __int__(self): + return 3 + + select.poll().poll(A()) # assert did not crash + class AppTestSelectWithPipes(_AppTestSelect): "Use a pipe to get pairs of file descriptors" def setup_class(cls): @@ -275,4 +283,3 @@ s1, addr2 = cls.sock.accept() return s1, s2 - Modified: pypy/branch/fast-forward/pypy/module/sys/version.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/sys/version.py (original) +++ pypy/branch/fast-forward/pypy/module/sys/version.py Mon Sep 20 13:19:51 2010 @@ -4,10 +4,11 @@ import os -CPYTHON_VERSION = (2, 7, 0, "final", 42) -CPYTHON_API_VERSION = 1012 +#XXX # the release serial 42 is not in range(16) +CPYTHON_VERSION = (2, 7, 0, "final", 42) #XXX # sync patchlevel.h +CPYTHON_API_VERSION = 1012 #XXX # sync with include/modsupport.h -PYPY_VERSION = (1, 3, 0, "beta", '?') +PYPY_VERSION = (1, 3, 0, "beta", '?') #XXX # sync patchlevel.h # the last item is replaced by the svn revision ^^^ TRIM_URL_UP_TO = 'svn/pypy/' Modified: pypy/branch/fast-forward/pypy/rlib/_rsocket_rffi.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/_rsocket_rffi.py (original) +++ pypy/branch/fast-forward/pypy/rlib/_rsocket_rffi.py Mon Sep 20 13:19:51 2010 @@ -32,11 +32,13 @@ 'arpa/inet.h', 'stdint.h', 'errno.h', - 'netpacket/packet.h', - 'sys/ioctl.h', - 'net/if.h', ) - cond_includes = [('AF_NETLINK', 'linux/netlink.h')] + + cond_includes = [('AF_NETLINK', 'linux/netlink.h'), + ('AF_PACKET', 'netpacket/packet.h'), + ('AF_PACKET', 'sys/ioctl.h'), + ('AF_PACKET', 'net/if.h')] + libraries = () calling_conv = 'c' HEADER = ''.join(['#include <%s>\n' % filename for filename in includes]) Modified: pypy/branch/fast-forward/pypy/rlib/rarithmetic.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rarithmetic.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rarithmetic.py Mon Sep 20 13:19:51 2010 @@ -50,6 +50,11 @@ LONG_MASK = _Ltest*2-1 LONG_TEST = _Ltest +LONG_BIT_SHIFT = 0 +while (1 << LONG_BIT_SHIFT) != LONG_BIT: + LONG_BIT_SHIFT += 1 + assert LONG_BIT_SHIFT < 99, "LONG_BIT_SHIFT value not found?" + INFINITY = 1e200 * 1e200 NAN = INFINITY / INFINITY Modified: pypy/branch/fast-forward/pypy/rlib/rgc.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rgc.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rgc.py Mon Sep 20 13:19:51 2010 @@ -1,6 +1,7 @@ -import gc +import gc, types from pypy.rpython.extregistry import ExtRegistryEntry from pypy.rlib.objectmodel import we_are_translated +from pypy.rpython.lltypesystem import lltype, llmemory # ____________________________________________________________ # General GC features @@ -93,7 +94,7 @@ def specialize_call(self, hop): from pypy.rpython.error import TyperError - from pypy.rpython.lltypesystem import lltype, llmemory, rtuple + from pypy.rpython.lltypesystem import rtuple from pypy.annotation import model as annmodel from pypy.rpython.memory.gc.marksweep import X_CLONE, X_CLONE_PTR @@ -150,7 +151,6 @@ return annmodel.s_None def specialize_call(self, hop): - from pypy.rpython.lltypesystem import lltype hop.exception_cannot_occur() args_v = [] if len(hop.args_s) == 1: @@ -165,7 +165,6 @@ return annmodel.s_None def specialize_call(self, hop): - from pypy.rpython.lltypesystem import lltype [v_nbytes] = hop.inputargs(lltype.Signed) hop.exception_cannot_occur() return hop.genop('gc_set_max_heap_size', [v_nbytes], @@ -182,7 +181,6 @@ return annmodel.SomeBool() def specialize_call(self, hop): - from pypy.rpython.lltypesystem import lltype hop.exception_cannot_occur() return hop.genop('gc_can_move', hop.args_v, resulttype=hop.r_result) @@ -195,11 +193,9 @@ def compute_result_annotation(self): from pypy.annotation import model as annmodel from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP - from pypy.rpython.lltypesystem import lltype return annmodel.SomePtr(lltype.Ptr(ARRAY_TYPEID_MAP)) def specialize_call(self, hop): - from pypy.rpython.lltypesystem import lltype from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP hop.exception_is_here() return hop.genop('gc_heap_stats', [], resulttype=hop.r_result) @@ -209,7 +205,6 @@ When running directly, will pretend that gc is always moving (might be configurable in a future) """ - from pypy.rpython.lltypesystem import lltype return lltype.nullptr(TP) class MallocNonMovingEntry(ExtRegistryEntry): @@ -221,7 +216,6 @@ return malloc(s_TP, s_n, s_zero=s_zero) def specialize_call(self, hop, i_zero=None): - from pypy.rpython.lltypesystem import lltype # XXX assume flavor and zero to be None by now assert hop.args_s[0].is_constant() vlist = [hop.inputarg(lltype.Void, arg=0)] @@ -243,7 +237,6 @@ def ll_arraycopy(source, dest, source_start, dest_start, length): from pypy.rpython.lltypesystem.lloperation import llop - from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rlib.objectmodel import keepalive_until_here # supports non-overlapping copies only @@ -279,7 +272,6 @@ def ll_shrink_array(p, smallerlength): from pypy.rpython.lltypesystem.lloperation import llop - from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rlib.objectmodel import keepalive_until_here if llop.shrink_array(lltype.Bool, p, smallerlength): @@ -313,3 +305,221 @@ func._dont_inline_ = True func._gc_no_collect_ = True return func + +# ____________________________________________________________ + +def get_rpy_roots(): + "NOT_RPYTHON" + # Return the 'roots' from the GC. + # This stub is not usable on top of CPython. + # The gc typically returns a list that ends with a few NULL_GCREFs. + raise NotImplementedError + +def get_rpy_referents(gcref): + "NOT_RPYTHON" + x = gcref._x + if isinstance(x, list): + d = x + elif isinstance(x, dict): + d = x.keys() + x.values() + else: + d = [] + if hasattr(x, '__dict__'): + d = x.__dict__.values() + if hasattr(type(x), '__slots__'): + for slot in type(x).__slots__: + try: + d.append(getattr(x, slot)) + except AttributeError: + pass + # discard objects that are too random or that are _freeze_=True + return [_GcRef(x) for x in d if _keep_object(x)] + +def _keep_object(x): + if isinstance(x, type) or type(x) is types.ClassType: + return False # don't keep any type + if isinstance(x, (list, dict, str)): + return True # keep lists and dicts and strings + try: + return not x._freeze_() # don't keep any frozen object + except AttributeError: + return type(x).__module__ != '__builtin__' # keep non-builtins + except Exception: + return False # don't keep objects whose _freeze_() method explodes + +def get_rpy_memory_usage(gcref): + "NOT_RPYTHON" + # approximate implementation using CPython's type info + Class = type(gcref._x) + size = Class.__basicsize__ + if Class.__itemsize__ > 0: + size += Class.__itemsize__ * len(gcref._x) + return size + +def get_rpy_type_index(gcref): + "NOT_RPYTHON" + from pypy.rlib.rarithmetic import intmask + Class = gcref._x.__class__ + return intmask(id(Class)) + +def cast_gcref_to_int(gcref): + if we_are_translated(): + return lltype.cast_ptr_to_int(gcref) + else: + return id(gcref._x) + +def dump_rpy_heap(fd): + "NOT_RPYTHON" + raise NotImplementedError + +NULL_GCREF = lltype.nullptr(llmemory.GCREF.TO) + +class _GcRef(object): + # implementation-specific: there should not be any after translation + __slots__ = ['_x'] + def __init__(self, x): + self._x = x + def __hash__(self): + return object.__hash__(self._x) + def __eq__(self, other): + if isinstance(other, lltype._ptr): + assert other == NULL_GCREF, ( + "comparing a _GcRef with a non-NULL lltype ptr") + return False + assert isinstance(other, _GcRef) + return self._x is other._x + def __ne__(self, other): + return not self.__eq__(other) + def __repr__(self): + return "_GcRef(%r)" % (self._x, ) + def _freeze_(self): + raise Exception("instances of rlib.rgc._GcRef cannot be translated") + +def cast_instance_to_gcref(x): + # Before translation, casts an RPython instance into a _GcRef. + # After translation, it is a variant of cast_object_to_ptr(GCREF). + if we_are_translated(): + from pypy.rpython import annlowlevel + x = annlowlevel.cast_instance_to_base_ptr(x) + return lltype.cast_opaque_ptr(llmemory.GCREF, x) + else: + return _GcRef(x) +cast_instance_to_gcref._annspecialcase_ = 'specialize:argtype(0)' + +def try_cast_gcref_to_instance(Class, gcref): + # Before translation, unwraps the RPython instance contained in a _GcRef. + # After translation, it is a type-check performed by the GC. + if we_are_translated(): + from pypy.rpython.annlowlevel import base_ptr_lltype + from pypy.rpython.annlowlevel import cast_base_ptr_to_instance + from pypy.rpython.lltypesystem import rclass + if _is_rpy_instance(gcref): + objptr = lltype.cast_opaque_ptr(base_ptr_lltype(), gcref) + if objptr.typeptr: # may be NULL, e.g. in rdict's dummykeyobj + clsptr = _get_llcls_from_cls(Class) + if rclass.ll_isinstance(objptr, clsptr): + return cast_base_ptr_to_instance(Class, objptr) + return None + else: + if isinstance(gcref._x, Class): + return gcref._x + return None +try_cast_gcref_to_instance._annspecialcase_ = 'specialize:arg(0)' + +# ------------------- implementation ------------------- + +_cache_s_list_of_gcrefs = None + +def s_list_of_gcrefs(): + global _cache_s_list_of_gcrefs + if _cache_s_list_of_gcrefs is None: + from pypy.annotation import model as annmodel + from pypy.annotation.listdef import ListDef + s_gcref = annmodel.SomePtr(llmemory.GCREF) + _cache_s_list_of_gcrefs = annmodel.SomeList( + ListDef(None, s_gcref, mutated=True, resized=False)) + return _cache_s_list_of_gcrefs + +class Entry(ExtRegistryEntry): + _about_ = get_rpy_roots + def compute_result_annotation(self): + return s_list_of_gcrefs() + def specialize_call(self, hop): + return hop.genop('gc_get_rpy_roots', [], resulttype = hop.r_result) + +class Entry(ExtRegistryEntry): + _about_ = get_rpy_referents + def compute_result_annotation(self, s_gcref): + from pypy.annotation import model as annmodel + assert annmodel.SomePtr(llmemory.GCREF).contains(s_gcref) + return s_list_of_gcrefs() + def specialize_call(self, hop): + vlist = hop.inputargs(hop.args_r[0]) + return hop.genop('gc_get_rpy_referents', vlist, + resulttype = hop.r_result) + +class Entry(ExtRegistryEntry): + _about_ = get_rpy_memory_usage + def compute_result_annotation(self, s_gcref): + from pypy.annotation import model as annmodel + return annmodel.SomeInteger() + def specialize_call(self, hop): + vlist = hop.inputargs(hop.args_r[0]) + return hop.genop('gc_get_rpy_memory_usage', vlist, + resulttype = hop.r_result) + +class Entry(ExtRegistryEntry): + _about_ = get_rpy_type_index + def compute_result_annotation(self, s_gcref): + from pypy.annotation import model as annmodel + return annmodel.SomeInteger() + def specialize_call(self, hop): + vlist = hop.inputargs(hop.args_r[0]) + return hop.genop('gc_get_rpy_type_index', vlist, + resulttype = hop.r_result) + +def _is_rpy_instance(gcref): + "NOT_RPYTHON" + raise NotImplementedError + +def _get_llcls_from_cls(Class): + "NOT_RPYTHON" + raise NotImplementedError + +class Entry(ExtRegistryEntry): + _about_ = _is_rpy_instance + def compute_result_annotation(self, s_gcref): + from pypy.annotation import model as annmodel + return annmodel.SomeBool() + def specialize_call(self, hop): + vlist = hop.inputargs(hop.args_r[0]) + return hop.genop('gc_is_rpy_instance', vlist, + resulttype = hop.r_result) + +class Entry(ExtRegistryEntry): + _about_ = _get_llcls_from_cls + def compute_result_annotation(self, s_Class): + from pypy.annotation import model as annmodel + from pypy.rpython.lltypesystem import rclass + assert s_Class.is_constant() + return annmodel.SomePtr(rclass.CLASSTYPE) + def specialize_call(self, hop): + from pypy.rpython.rclass import getclassrepr + from pypy.objspace.flow.model import Constant + from pypy.rpython.lltypesystem import rclass + Class = hop.args_s[0].const + classdef = hop.rtyper.annotator.bookkeeper.getuniqueclassdef(Class) + classrepr = getclassrepr(hop.rtyper, classdef) + vtable = classrepr.getvtable() + assert lltype.typeOf(vtable) == rclass.CLASSTYPE + return Constant(vtable, concretetype=rclass.CLASSTYPE) + +class Entry(ExtRegistryEntry): + _about_ = dump_rpy_heap + def compute_result_annotation(self, s_fd): + from pypy.annotation.model import s_Bool + return s_Bool + def specialize_call(self, hop): + vlist = hop.inputargs(lltype.Signed) + hop.exception_is_here() + return hop.genop('gc_dump_rpy_heap', vlist, resulttype = hop.r_result) Modified: pypy/branch/fast-forward/pypy/rlib/rstring.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rstring.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rstring.py Mon Sep 20 13:19:51 2010 @@ -46,7 +46,9 @@ # -------------- public API --------------------------------- -INIT_SIZE = 100 # XXX tweak +# the following number is the maximum size of an RPython unicode +# string that goes into the nursery of the minimark GC. +INIT_SIZE = 56 class AbstractStringBuilder(object): def __init__(self, init_size=INIT_SIZE): Modified: pypy/branch/fast-forward/pypy/rlib/rwin32.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rwin32.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rwin32.py Mon Sep 20 13:19:51 2010 @@ -82,6 +82,8 @@ if WIN32: HANDLE = rffi.COpaquePtr(typedef='HANDLE') + assert rffi.cast(HANDLE, -1) == rffi.cast(HANDLE, -1) + LPHANDLE = rffi.CArrayPtr(HANDLE) HMODULE = HANDLE NULL_HANDLE = rffi.cast(HANDLE, 0) Modified: pypy/branch/fast-forward/pypy/rlib/test/test_rgc.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/test/test_rgc.py (original) +++ pypy/branch/fast-forward/pypy/rlib/test/test_rgc.py Mon Sep 20 13:19:51 2010 @@ -16,7 +16,7 @@ assert len(op.args) == 0 res = interpret(f, []) - + assert res is None def test_collect_0(): @@ -31,13 +31,13 @@ assert len(ops) == 1 op = ops[0][1] assert op.opname == 'gc__collect' - assert len(op.args) == 1 + assert len(op.args) == 1 assert op.args[0].value == 0 res = interpret(f, []) - - assert res is None - + + assert res is None + def test_can_move(): T0 = lltype.GcStruct('T') T1 = lltype.GcArray(lltype.Float) @@ -53,9 +53,9 @@ assert len(res) == 2 res = interpret(f, [1]) - + assert res == True - + def test_ll_arraycopy_1(): TYPE = lltype.GcArray(lltype.Signed) a1 = lltype.malloc(TYPE, 10) @@ -153,3 +153,21 @@ assert len(s2.vars) == 3 for i in range(3): assert s2.vars[i] == 50 + i + +def test_get_referents(): + class X(object): + __slots__ = ['stuff'] + x1 = X() + x1.stuff = X() + x2 = X() + lst = rgc.get_rpy_referents(rgc.cast_instance_to_gcref(x1)) + lst2 = [rgc.try_cast_gcref_to_instance(X, x) for x in lst] + assert x1.stuff in lst2 + assert x2 not in lst2 + +def test_get_memory_usage(): + class X(object): + pass + x1 = X() + n = rgc.get_rpy_memory_usage(rgc.cast_instance_to_gcref(x1)) + assert n >= 8 and n <= 64 Modified: pypy/branch/fast-forward/pypy/rpython/llinterp.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/llinterp.py (original) +++ pypy/branch/fast-forward/pypy/rpython/llinterp.py Mon Sep 20 13:19:51 2010 @@ -650,7 +650,7 @@ offsets, fieldvalue = fieldnamesval[:-1], fieldnamesval[-1] inneraddr, FIELD = self.getinneraddr(obj, *offsets) if FIELD is not lltype.Void: - self.heap.setinterior(obj, inneraddr, FIELD, fieldvalue) + self.heap.setinterior(obj, inneraddr, FIELD, fieldvalue, offsets) def op_bare_setinteriorfield(self, obj, *fieldnamesval): offsets, fieldvalue = fieldnamesval[:-1], fieldnamesval[-1] @@ -916,6 +916,24 @@ def op_gc_get_type_info_group(self): raise NotImplementedError("gc_get_type_info_group") + def op_gc_get_rpy_memory_usage(self): + raise NotImplementedError("gc_get_rpy_memory_usage") + + def op_gc_get_rpy_roots(self): + raise NotImplementedError("gc_get_rpy_roots") + + def op_gc_get_rpy_referents(self): + raise NotImplementedError("gc_get_rpy_referents") + + def op_gc_is_rpy_instance(self): + raise NotImplementedError("gc_is_rpy_instance") + + def op_gc_get_rpy_type_index(self): + raise NotImplementedError("gc_get_rpy_type_index") + + def op_gc_dump_rpy_heap(self): + raise NotImplementedError("gc_dump_rpy_heap") + def op_do_malloc_fixedsize_clear(self): raise NotImplementedError("do_malloc_fixedsize_clear") @@ -925,6 +943,9 @@ def op_get_write_barrier_failing_case(self): raise NotImplementedError("get_write_barrier_failing_case") + def op_get_write_barrier_from_array_failing_case(self): + raise NotImplementedError("get_write_barrier_from_array_failing_case") + def op_yield_current_frame_to_caller(self): raise NotImplementedError("yield_current_frame_to_caller") Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py Mon Sep 20 13:19:51 2010 @@ -26,9 +26,6 @@ from pypy.translator.platform import platform from array import array -def uaddressof(obj): - return fixid(ctypes.addressof(obj)) - _ctypes_cache = {} _eci_cache = {} @@ -251,7 +248,7 @@ else: n = None cstruct = cls._malloc(n) - add_storage(container, _struct_mixin, cstruct) + add_storage(container, _struct_mixin, ctypes.pointer(cstruct)) for field_name in STRUCT._names: FIELDTYPE = getattr(STRUCT, field_name) field_value = getattr(container, field_name) @@ -264,8 +261,6 @@ if isinstance(FIELDTYPE, lltype.Struct): csubstruct = getattr(cstruct, field_name) convert_struct(field_value, csubstruct) - subcontainer = getattr(container, field_name) - substorage = subcontainer._storage elif field_name == STRUCT._arrayfld: # inlined var-sized part csubarray = getattr(cstruct, field_name) convert_array(field_value, csubarray) @@ -292,7 +287,7 @@ # regular case: allocate a new ctypes array of the proper type cls = get_ctypes_type(ARRAY) carray = cls._malloc(container.getlength()) - add_storage(container, _array_mixin, carray) + add_storage(container, _array_mixin, ctypes.pointer(carray)) if not isinstance(ARRAY.OF, lltype.ContainerType): # fish that we have enough space ctypes_array = ctypes.cast(carray.items, @@ -321,13 +316,15 @@ if isinstance(FIELDTYPE, lltype.ContainerType): if isinstance(FIELDTYPE, lltype.Struct): struct_container = getattr(container, field_name) - struct_storage = getattr(ctypes_storage, field_name) + struct_storage = ctypes.pointer( + getattr(ctypes_storage.contents, field_name)) struct_use_ctypes_storage(struct_container, struct_storage) struct_container._setparentstructure(container, field_name) elif isinstance(FIELDTYPE, lltype.Array): assert FIELDTYPE._hints.get('nolength', False) == False arraycontainer = _array_of_known_length(FIELDTYPE) - arraycontainer._storage = getattr(ctypes_storage, field_name) + arraycontainer._storage = ctypes.pointer( + getattr(ctypes_storage.contents, field_name)) arraycontainer._setparentstructure(container, field_name) object.__setattr__(container, field_name, arraycontainer) else: @@ -352,6 +349,8 @@ def add_storage(instance, mixin_cls, ctypes_storage): """Put ctypes_storage on the instance, changing its __class__ so that it sees the methods of the given mixin class.""" + # _storage is a ctypes pointer to a structure + # except for Opaque objects which use a c_void_p. assert not isinstance(instance, _parentable_mixin) # not yet subcls = get_common_subclass(mixin_cls, instance.__class__) instance.__class__ = subcls @@ -365,17 +364,23 @@ __slots__ = () def _ctypes_storage_was_allocated(self): - addr = ctypes.addressof(self._storage) + addr = ctypes.cast(self._storage, ctypes.c_void_p).value if addr in ALLOCATED: raise Exception("internal ll2ctypes error - " "double conversion from lltype to ctypes?") # XXX don't store here immortal structures ALLOCATED[addr] = self + def _addressof_storage(self): + "Returns the storage address as an int" + if self._storage is None or self._storage is True: + raise ValueError("Not a ctypes allocated structure") + return ctypes.cast(self._storage, ctypes.c_void_p).value + def _free(self): self._check() # no double-frees # allow the ctypes object to go away now - addr = ctypes.addressof(self._storage) + addr = ctypes.cast(self._storage, ctypes.c_void_p).value try: del ALLOCATED[addr] except KeyError: @@ -393,16 +398,16 @@ raise RuntimeError("pointer comparison with a freed structure") if other._storage is True: return False # the other container is not ctypes-based - addressof_other = ctypes.addressof(other._storage) - # both containers are ctypes-based, compare by address - return (ctypes.addressof(self._storage) == addressof_other) + addressof_other = other._addressof_storage() + # both containers are ctypes-based, compare the addresses + return self._addressof_storage() == addressof_other def __ne__(self, other): return not (self == other) def __hash__(self): if self._storage is not None: - return ctypes.addressof(self._storage) + return self._addressof_storage() else: return object.__hash__(self) @@ -411,7 +416,7 @@ return '' % (self._TYPE,) else: return '' % (self._TYPE, - uaddressof(self._storage),) + fixid(self._addressof_storage())) def __str__(self): return repr(self) @@ -422,7 +427,7 @@ def __getattr__(self, field_name): T = getattr(self._TYPE, field_name) - cobj = getattr(self._storage, field_name) + cobj = getattr(self._storage.contents, field_name) return ctypes2lltype(T, cobj) def __setattr__(self, field_name, value): @@ -430,17 +435,17 @@ object.__setattr__(self, field_name, value) # '_xxx' attributes else: cobj = lltype2ctypes(value) - setattr(self._storage, field_name, cobj) + setattr(self._storage.contents, field_name, cobj) class _array_mixin(_parentable_mixin): """Mixin added to _array containers when they become ctypes-based.""" __slots__ = () def getitem(self, index, uninitialized_ok=False): - return self._storage._getitem(index) + return self._storage.contents._getitem(index) def setitem(self, index, value): - self._storage._setitem(index, value) + self._storage.contents._setitem(index, value) class _array_of_unknown_length(_parentable_mixin, lltype._parentable): _kind = "array" @@ -451,10 +456,10 @@ return 0, sys.maxint def getitem(self, index, uninitialized_ok=False): - return self._storage._getitem(index, boundscheck=False) + return self._storage.contents._getitem(index, boundscheck=False) def setitem(self, index, value): - self._storage._setitem(index, value, boundscheck=False) + self._storage.contents._setitem(index, value, boundscheck=False) def getitems(self): if self._TYPE.OF != lltype.Char: @@ -476,7 +481,7 @@ __slots__ = () def getlength(self): - return self._storage.length + return self._storage.contents.length def getbounds(self): return 0, self.getlength() @@ -653,17 +658,18 @@ container._ctypes_storage_was_allocated() if isinstance(T.TO, lltype.OpaqueType): - return container._storage + return container._storage.value storage = container._storage - p = ctypes.pointer(storage) + p = storage if index: p = ctypes.cast(p, ctypes.c_void_p) p = ctypes.c_void_p(p.value + index) c_tp = get_ctypes_type(T.TO) - storage._normalized_ctype = c_tp - if normalize and hasattr(storage, '_normalized_ctype'): - p = ctypes.cast(p, ctypes.POINTER(storage._normalized_ctype)) + storage.contents._normalized_ctype = c_tp + if normalize and hasattr(storage.contents, '_normalized_ctype'): + normalized_ctype = storage.contents._normalized_ctype + p = ctypes.cast(p, ctypes.POINTER(normalized_ctype)) if lltype.typeOf(llobj) == llmemory.GCREF: p = ctypes.cast(p, ctypes.c_void_p) return p @@ -707,13 +713,13 @@ cobjheader = ctypes.cast(cobj, get_ctypes_type(lltype.Ptr(OBJECT))) struct_use_ctypes_storage(containerheader, - cobjheader.contents) + cobjheader) REAL_TYPE = get_rtyper().get_type_for_typeptr( containerheader.typeptr) REAL_T = lltype.Ptr(REAL_TYPE) cobj = ctypes.cast(cobj, get_ctypes_type(REAL_T)) container = lltype._struct(REAL_TYPE) - struct_use_ctypes_storage(container, cobj.contents) + struct_use_ctypes_storage(container, cobj) if REAL_TYPE != T.TO: p = container._as_ptr() container = lltype.cast_pointer(T, p)._as_obj() @@ -728,10 +734,10 @@ elif isinstance(T.TO, lltype.Array): if T.TO._hints.get('nolength', False): container = _array_of_unknown_length(T.TO) - container._storage = cobj.contents + container._storage = cobj else: container = _array_of_known_length(T.TO) - container._storage = cobj.contents + container._storage = cobj elif isinstance(T.TO, lltype.FuncType): cobjkey = intmask(ctypes.cast(cobj, ctypes.c_void_p).value) if cobjkey in _int2obj: @@ -745,7 +751,8 @@ container = _llgcopaque(cobj) else: container = lltype._opaque(T.TO) - container._storage = ctypes.cast(cobj, ctypes.c_void_p) + cbuf = ctypes.cast(cobj, ctypes.c_void_p) + add_storage(container, _parentable_mixin, cbuf) else: raise NotImplementedError(T) llobj = lltype._ptr(T, container, solid=True) Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/llarena.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/llarena.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/llarena.py Mon Sep 20 13:19:51 2010 @@ -16,8 +16,11 @@ class Arena(object): object_arena_location = {} # {container: (arena, offset)} old_object_arena_location = weakref.WeakKeyDictionary() + _count_arenas = 0 def __init__(self, nbytes, zero): + Arena._count_arenas += 1 + self._arena_index = Arena._count_arenas self.nbytes = nbytes self.usagemap = array.array('c') self.objectptrs = {} # {offset: ptr-to-container} @@ -25,6 +28,9 @@ self.freed = False self.reset(zero) + def __repr__(self): + return '' % (self._arena_index, self.nbytes) + def reset(self, zero, start=0, size=None): self.check() if size is None: @@ -40,7 +46,7 @@ assert offset >= stop, "object overlaps cleared area" else: obj = ptr._obj - del Arena.object_arena_location[obj] + _dictdel(Arena.object_arena_location, obj) del self.objectptrs[offset] del self.objectsizes[offset] obj._free() @@ -63,7 +69,7 @@ raise ArenaError("Address offset is outside the arena") return fakearenaaddress(self, offset) - def allocate_object(self, offset, size): + def allocate_object(self, offset, size, letter='x'): self.check() bytes = llmemory.raw_malloc_usage(size) if offset + bytes > self.nbytes: @@ -78,7 +84,7 @@ raise ArenaError("new object overlaps a previous object") assert offset not in self.objectptrs addr2 = size._raw_malloc([], zero=zero) - pattern = 'X' + 'x'*(bytes-1) + pattern = letter.upper() + letter*(bytes-1) self.usagemap[offset:offset+bytes] = array.array('c', pattern) self.setobject(addr2, offset, bytes) # common case: 'size' starts with a GCHeaderOffset. In this case @@ -252,6 +258,16 @@ raise RuntimeError(msg % (obj,)) return arena.getaddr(offset) +def _dictdel(d, key): + # hack + try: + del d[key] + except KeyError: + items = d.items() + d.clear() + d.update(items) + del d[key] + class RoundedUpForAllocation(llmemory.AddressOffset): """A size that is rounded up in order to preserve alignment of objects following it. For arenas containing heterogenous objects. @@ -297,6 +313,7 @@ assert isinstance(arena_addr, fakearenaaddress) assert arena_addr.offset == 0 arena_addr.arena.reset(False) + assert not arena_addr.arena.objectptrs arena_addr.arena.freed = True def arena_reset(arena_addr, size, zero): @@ -317,10 +334,13 @@ this is used to know what type of lltype object to allocate.""" from pypy.rpython.memory.lltypelayout import memory_alignment addr = getfakearenaaddress(addr) - if check_alignment and (addr.offset & (memory_alignment-1)) != 0: + letter = 'x' + if llmemory.raw_malloc_usage(size) == 1: + letter = 'b' # for Byte-aligned allocations + elif check_alignment and (addr.offset & (memory_alignment-1)) != 0: raise ArenaError("object at offset %d would not be correctly aligned" % (addr.offset,)) - addr.arena.allocate_object(addr.offset, size) + addr.arena.allocate_object(addr.offset, size, letter) def arena_shrink_obj(addr, newsize): """ Mark object as shorter than it was @@ -357,6 +377,11 @@ # This only works with linux's madvise(), which is really not a memory # usage hint but a real command. It guarantees that after MADV_DONTNEED # the pages are cleared again. + + # Note that the trick of the general 'posix' section below, i.e. + # reading /dev/zero, does not seem to have the correct effect of + # lazily-allocating pages on all Linux systems. + from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo _eci = ExternalCompilationInfo(includes=['sys/mman.h']) @@ -459,6 +484,7 @@ sandboxsafe=True) def llimpl_arena_free(arena_addr): + # NB. minimark.py assumes that arena_free() is actually just a raw_free(). llmemory.raw_free(arena_addr) register_external(arena_free, [llmemory.Address], None, 'll_arena.arena_free', llimpl=llimpl_arena_free, Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/llheap.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/llheap.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/llheap.py Mon Sep 20 13:19:51 2010 @@ -8,7 +8,8 @@ from pypy.rlib.rgc import collect from pypy.rlib.rgc import can_move -def setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue): +def setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue, + offsets=None): assert typeOf(newvalue) == INNERTYPE # xxx access the address object's ref() directly for performance inneraddr.ref()[0] = newvalue Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/llmemory.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/llmemory.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/llmemory.py Mon Sep 20 13:19:51 2010 @@ -409,6 +409,9 @@ if self.ptr is None: s = 'NULL' else: + #try: + # s = hex(self.ptr._cast_to_int()) + #except: s = str(self.ptr) return '' % (s,) Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/lloperation.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/lloperation.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/lloperation.py Mon Sep 20 13:19:51 2010 @@ -438,6 +438,7 @@ 'do_malloc_fixedsize_clear':LLOp(canraise=(MemoryError,),canunwindgc=True), 'do_malloc_varsize_clear': LLOp(canraise=(MemoryError,),canunwindgc=True), 'get_write_barrier_failing_case': LLOp(sideeffects=False), + 'get_write_barrier_from_array_failing_case': LLOp(sideeffects=False), 'gc_get_type_info_group': LLOp(sideeffects=False), # __________ GC operations __________ @@ -469,6 +470,13 @@ 'gc_writebarrier_before_copy': LLOp(canrun=True), 'gc_heap_stats' : LLOp(canunwindgc=True), + 'gc_get_rpy_roots' : LLOp(), + 'gc_get_rpy_referents': LLOp(), + 'gc_get_rpy_memory_usage': LLOp(), + 'gc_get_rpy_type_index': LLOp(), + 'gc_is_rpy_instance' : LLOp(), + 'gc_dump_rpy_heap' : LLOp(), + # ------- JIT & GC interaction, only for some GCs ---------- 'gc_adr_of_nursery_free' : LLOp(), Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_ll2ctypes.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_ll2ctypes.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_ll2ctypes.py Mon Sep 20 13:19:51 2010 @@ -353,6 +353,8 @@ assert tmppath.check(file=1) assert not ALLOCATED # detects memory leaks in the test + assert rffi.cast(FILEP, -1) == rffi.cast(FILEP, -1) + def test_simple_cast(self): assert rffi.cast(rffi.SIGNEDCHAR, 0x123456) == 0x56 assert rffi.cast(rffi.SIGNEDCHAR, 0x123481) == -127 Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py Mon Sep 20 13:19:51 2010 @@ -5,6 +5,7 @@ from pypy.rpython.memory.support import get_address_stack, get_address_deque from pypy.rpython.memory.support import AddressDict from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage +from pypy.rlib.rarithmetic import r_uint TYPEID_MAP = lltype.GcStruct('TYPEID_MAP', ('count', lltype.Signed), ('size', lltype.Signed), @@ -53,7 +54,8 @@ varsize_offset_to_length, varsize_offsets_to_gcpointers_in_var_part, weakpointer_offset, - member_index): + member_index, + is_rpython_class): self.getfinalizer = getfinalizer self.is_varsize = is_varsize self.has_gcptr_in_varsize = has_gcptr_in_varsize @@ -66,6 +68,7 @@ self.varsize_offsets_to_gcpointers_in_var_part = varsize_offsets_to_gcpointers_in_var_part self.weakpointer_offset = weakpointer_offset self.member_index = member_index + self.is_rpython_class = is_rpython_class def get_member_index(self, type_id): return self.member_index(type_id) @@ -101,6 +104,9 @@ def get_size(self, obj): return self._get_size_for_typeid(obj, self.get_type_id(obj)) + def get_size_incl_hash(self, obj): + return self.get_size(obj) + def malloc(self, typeid, length=0, zero=False): """For testing. The interface used by the gctransformer is the four malloc_[fixed,var]size[_clear]() functions. @@ -146,7 +152,7 @@ return False def set_max_heap_size(self, size): - pass + raise NotImplementedError def x_swap_pool(self, newpool): return newpool @@ -194,6 +200,39 @@ length -= 1 trace._annspecialcase_ = 'specialize:arg(2)' + def trace_partial(self, obj, start, stop, callback, arg): + """Like trace(), but only walk the array part, for indices in + range(start, stop). Must only be called if has_gcptr_in_varsize(). + """ + length = stop - start + typeid = self.get_type_id(obj) + if self.is_gcarrayofgcptr(typeid): + # a performance shortcut for GcArray(gcptr) + item = obj + llmemory.gcarrayofptr_itemsoffset + item += llmemory.gcarrayofptr_singleitemoffset * start + while length > 0: + if self.points_to_valid_gc_object(item): + callback(item, arg) + item += llmemory.gcarrayofptr_singleitemoffset + length -= 1 + return + ll_assert(self.has_gcptr_in_varsize(typeid), + "trace_partial() on object without has_gcptr_in_varsize()") + item = obj + self.varsize_offset_to_variable_part(typeid) + offsets = self.varsize_offsets_to_gcpointers_in_var_part(typeid) + itemlength = self.varsize_item_sizes(typeid) + item += itemlength * start + while length > 0: + j = 0 + while j < len(offsets): + itemobj = item + offsets[j] + if self.points_to_valid_gc_object(itemobj): + callback(itemobj, arg) + j += 1 + item += itemlength + length -= 1 + trace_partial._annspecialcase_ = 'specialize:arg(4)' + def points_to_valid_gc_object(self, addr): return self.is_valid_gc_object(addr.address[0]) @@ -340,6 +379,7 @@ "generation": "generation.GenerationGC", "hybrid": "hybrid.HybridGC", "markcompact" : "markcompact.MarkCompactGC", + "minimark" : "minimark.MiniMarkGC", } try: modulename, classname = classes[config.translation.gc].split('.') @@ -351,10 +391,12 @@ GCClass = getattr(module, classname) return GCClass, GCClass.TRANSLATION_PARAMS -def read_from_env(varname): +def _read_float_and_factor_from_env(varname): import os value = os.environ.get(varname) if value: + if len(value) > 1 and value[-1] in 'bB': + value = value[:-1] realvalue = value[:-1] if value[-1] in 'kK': factor = 1024 @@ -366,7 +408,21 @@ factor = 1 realvalue = value try: - return int(float(realvalue) * factor) + return (float(realvalue), factor) except ValueError: pass - return -1 + return (0.0, 0) + +def read_from_env(varname): + value, factor = _read_float_and_factor_from_env(varname) + return int(value * factor) + +def read_uint_from_env(varname): + value, factor = _read_float_and_factor_from_env(varname) + return r_uint(value * factor) + +def read_float_from_env(varname): + value, factor = _read_float_and_factor_from_env(varname) + if factor != 1: + return 0.0 + return value Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py Mon Sep 20 13:19:51 2010 @@ -449,7 +449,7 @@ # for the JIT: a minimal description of the write_barrier() method # (the JIT assumes it is of the shape - # "if newvalue.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") + # "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS def write_barrier(self, newvalue, addr_struct): Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/markcompact.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gc/markcompact.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gc/markcompact.py Mon Sep 20 13:19:51 2010 @@ -674,6 +674,13 @@ return llmemory.cast_adr_to_int(obj) # not in an arena... return adr - self.space + def get_size_incl_hash(self, obj): + size = self.get_size(obj) + hdr = self.header(obj) + if hdr.tid & GCFLAG_HASHFIELD: + size += llmemory.sizeof(lltype.Signed) + return size + # ____________________________________________________________ class CannotAllocateGCArena(Exception): Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py Mon Sep 20 13:19:51 2010 @@ -95,7 +95,10 @@ if self.gc.needs_write_barrier: newaddr = llmemory.cast_ptr_to_adr(newvalue) addr_struct = llmemory.cast_ptr_to_adr(p) - self.gc.write_barrier(newaddr, addr_struct) + if hasattr(self.gc, 'write_barrier_from_array'): + self.gc.write_barrier_from_array(newaddr, addr_struct, index) + else: + self.gc.write_barrier(newaddr, addr_struct) p[index] = newvalue def malloc(self, TYPE, n=None): @@ -326,6 +329,27 @@ self.gc.collect() assert hash == self.gc.identityhash(self.stackroots[-1]) self.stackroots.pop() + # (6) ask for the hash of varsized objects, larger and larger + for i in range(10): + self.gc.collect() + p = self.malloc(VAR, i) + self.stackroots.append(p) + hash = self.gc.identityhash(p) + self.gc.collect() + assert hash == self.gc.identityhash(self.stackroots[-1]) + self.stackroots.pop() + + def test_memory_alignment(self): + A1 = lltype.GcArray(lltype.Char) + for i in range(50): + p1 = self.malloc(A1, i) + if i: + p1[i-1] = chr(i) + self.stackroots.append(p1) + self.gc.collect() + for i in range(1, 50): + p = self.stackroots[-50+i] + assert p[i-1] == chr(i) class TestSemiSpaceGC(DirectGCTest): from pypy.rpython.memory.gc.semispace import SemiSpaceGC as GCClass @@ -456,3 +480,35 @@ def test_varsized_from_prebuilt_gc(self): DirectGCTest.test_varsized_from_prebuilt_gc(self) test_varsized_from_prebuilt_gc.GC_PARAMS = {'space_size': 3 * 1024 * WORD} + + +class TestMiniMarkGCSimple(DirectGCTest): + from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass + from pypy.rpython.memory.gc.minimark import SimpleArenaCollection + # test the GC itself, providing a simple class for ArenaCollection + GC_PARAMS = {'ArenaCollectionClass': SimpleArenaCollection} + + def test_card_marker(self): + for arraylength in (range(4, 17) + + [69] # 3 bytes + + [300]): # 10 bytes + print 'array length:', arraylength + nums = {} + a = self.malloc(VAR, arraylength) + self.stackroots.append(a) + for i in range(50): + p = self.malloc(S) + p.x = -i + a = self.stackroots[-1] + index = (i*i) % arraylength + self.writearray(a, index, p) + nums[index] = p.x + # + for index, expected_x in nums.items(): + assert a[index].x == expected_x + self.stackroots.pop() + test_card_marker.GC_PARAMS = {"card_page_indices": 4, + "card_page_indices_min": 7} + +class TestMiniMarkGCFull(DirectGCTest): + from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass Modified: pypy/branch/fast-forward/pypy/rpython/memory/gctransform/framework.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gctransform/framework.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gctransform/framework.py Mon Sep 20 13:19:51 2010 @@ -7,7 +7,7 @@ from pypy.rpython.memory.gc import marksweep from pypy.rpython.memory.gcheader import GCHeaderBuilder from pypy.rlib.rarithmetic import ovfcheck -from pypy.rlib import rstack +from pypy.rlib import rstack, rgc from pypy.rlib.debug import ll_assert from pypy.translator.backendopt import graphanalyze from pypy.translator.backendopt.support import var_needsgc @@ -139,6 +139,8 @@ def __init__(self, translator): from pypy.rpython.memory.gc.base import choose_gc_from_config from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP + from pypy.rpython.memory.gc import inspect + super(FrameworkGCTransformer, self).__init__(translator, inline=True) if hasattr(self, 'GC_PARAMS'): # for tests: the GC choice can be specified as class attributes @@ -180,6 +182,7 @@ gcdata.gc.set_root_walker(root_walker) self.num_pushs = 0 self.write_barrier_calls = 0 + self.write_barrier_from_array_calls = 0 def frameworkgc_setup(): # run-time initialization code @@ -388,11 +391,38 @@ else: self.id_ptr = None + self.get_rpy_roots_ptr = getfn(inspect.get_rpy_roots, + [s_gc], + rgc.s_list_of_gcrefs(), + minimal_transform=False) + self.get_rpy_referents_ptr = getfn(inspect.get_rpy_referents, + [s_gc, s_gcref], + rgc.s_list_of_gcrefs(), + minimal_transform=False) + self.get_rpy_memory_usage_ptr = getfn(inspect.get_rpy_memory_usage, + [s_gc, s_gcref], + annmodel.SomeInteger(), + minimal_transform=False) + self.get_rpy_type_index_ptr = getfn(inspect.get_rpy_type_index, + [s_gc, s_gcref], + annmodel.SomeInteger(), + minimal_transform=False) + self.is_rpy_instance_ptr = getfn(inspect.is_rpy_instance, + [s_gc, s_gcref], + annmodel.SomeBool(), + minimal_transform=False) + self.dump_rpy_heap_ptr = getfn(inspect.dump_rpy_heap, + [s_gc, annmodel.SomeInteger()], + annmodel.s_Bool, + minimal_transform=False) + self.set_max_heap_size_ptr = getfn(GCClass.set_max_heap_size.im_func, [s_gc, annmodel.SomeInteger(nonneg=True)], annmodel.s_None) + self.write_barrier_ptr = None + self.write_barrier_from_array_ptr = None if GCClass.needs_write_barrier: self.write_barrier_ptr = getfn(GCClass.write_barrier.im_func, [s_gc, @@ -408,8 +438,26 @@ [annmodel.SomeAddress(), annmodel.SomeAddress()], annmodel.s_None) - else: - self.write_barrier_ptr = None + func = getattr(GCClass, 'write_barrier_from_array', None) + if func is not None: + self.write_barrier_from_array_ptr = getfn(func.im_func, + [s_gc, + annmodel.SomeAddress(), + annmodel.SomeAddress(), + annmodel.SomeInteger()], + annmodel.s_None, + inline=True) + func = getattr(gcdata.gc, 'remember_young_pointer_from_array', + None) + if func is not None: + # func should not be a bound method, but a real function + assert isinstance(func, types.FunctionType) + self.write_barrier_from_array_failing_case_ptr = \ + getfn(func, + [annmodel.SomeAddress(), + annmodel.SomeInteger(), + annmodel.SomeAddress()], + annmodel.s_None) self.statistics_ptr = getfn(GCClass.statistics.im_func, [s_gc, annmodel.SomeInteger()], annmodel.SomeInteger()) @@ -496,6 +544,9 @@ if self.write_barrier_ptr: log.info("inserted %s write barrier calls" % ( self.write_barrier_calls, )) + if self.write_barrier_from_array_ptr: + log.info("inserted %s write_barrier_from_array calls" % ( + self.write_barrier_from_array_calls, )) # XXX because we call inputconst already in replace_malloc, we can't # modify the instance, we have to modify the 'rtyped instance' @@ -766,6 +817,12 @@ [self.write_barrier_failing_case_ptr], resultvar=op.result) + def gct_get_write_barrier_from_array_failing_case(self, hop): + op = hop.spaceop + hop.genop("same_as", + [self.write_barrier_from_array_failing_case_ptr], + resultvar=op.result) + def gct_zero_gc_pointers_inside(self, hop): if not self.malloc_zero_filled: v_ob = hop.spaceop.args[0] @@ -883,6 +940,53 @@ def gct_gc_get_type_info_group(self, hop): return hop.cast_result(self.c_type_info_group) + def gct_gc_get_rpy_roots(self, hop): + livevars = self.push_roots(hop) + hop.genop("direct_call", + [self.get_rpy_roots_ptr, self.c_const_gc], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + + def gct_gc_get_rpy_referents(self, hop): + livevars = self.push_roots(hop) + [v_ptr] = hop.spaceop.args + hop.genop("direct_call", + [self.get_rpy_referents_ptr, self.c_const_gc, v_ptr], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + + def gct_gc_get_rpy_memory_usage(self, hop): + livevars = self.push_roots(hop) + [v_ptr] = hop.spaceop.args + hop.genop("direct_call", + [self.get_rpy_memory_usage_ptr, self.c_const_gc, v_ptr], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + + def gct_gc_get_rpy_type_index(self, hop): + livevars = self.push_roots(hop) + [v_ptr] = hop.spaceop.args + hop.genop("direct_call", + [self.get_rpy_type_index_ptr, self.c_const_gc, v_ptr], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + + def gct_gc_is_rpy_instance(self, hop): + livevars = self.push_roots(hop) + [v_ptr] = hop.spaceop.args + hop.genop("direct_call", + [self.is_rpy_instance_ptr, self.c_const_gc, v_ptr], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + + def gct_gc_dump_rpy_heap(self, hop): + livevars = self.push_roots(hop) + [v_fd] = hop.spaceop.args + hop.genop("direct_call", + [self.dump_rpy_heap_ptr, self.c_const_gc, v_fd], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + def gct_malloc_nonmovable_varsize(self, hop): TYPE = hop.spaceop.result.concretetype if self.gcdata.gc.can_malloc_nonmovable(): @@ -897,6 +1001,15 @@ c = rmodel.inputconst(TYPE, lltype.nullptr(TYPE.TO)) return hop.cast_result(c) + def _set_into_gc_array_part(self, op): + if op.opname == 'setarrayitem': + return op.args[1] + if op.opname == 'setinteriorfield': + for v in op.args[1:-1]: + if v.concretetype is not lltype.Void: + return v + return None + def transform_generic_set(self, hop): from pypy.objspace.flow.model import Constant opname = hop.spaceop.opname @@ -910,15 +1023,26 @@ and not isinstance(v_newvalue, Constant) and v_struct.concretetype.TO._gckind == "gc" and hop.spaceop not in self.clean_sets): - self.write_barrier_calls += 1 v_newvalue = hop.genop("cast_ptr_to_adr", [v_newvalue], resulttype = llmemory.Address) v_structaddr = hop.genop("cast_ptr_to_adr", [v_struct], resulttype = llmemory.Address) - hop.genop("direct_call", [self.write_barrier_ptr, - self.c_const_gc, - v_newvalue, - v_structaddr]) + if (self.write_barrier_from_array_ptr is not None and + self._set_into_gc_array_part(hop.spaceop) is not None): + self.write_barrier_from_array_calls += 1 + v_index = self._set_into_gc_array_part(hop.spaceop) + assert v_index.concretetype == lltype.Signed + hop.genop("direct_call", [self.write_barrier_from_array_ptr, + self.c_const_gc, + v_newvalue, + v_structaddr, + v_index]) + else: + self.write_barrier_calls += 1 + hop.genop("direct_call", [self.write_barrier_ptr, + self.c_const_gc, + v_newvalue, + v_structaddr]) hop.rename('bare_' + opname) def transform_getfield_typeptr(self, hop): Modified: pypy/branch/fast-forward/pypy/rpython/memory/gctypelayout.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gctypelayout.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gctypelayout.py Mon Sep 20 13:19:51 2010 @@ -101,6 +101,10 @@ infobits = self.get(typeid).infobits return infobits & T_MEMBER_INDEX + def q_is_rpython_class(self, typeid): + infobits = self.get(typeid).infobits + return infobits & T_IS_RPYTHON_INSTANCE != 0 + def set_query_functions(self, gc): gc.set_query_functions( self.q_is_varsize, @@ -114,7 +118,8 @@ self.q_varsize_offset_to_length, self.q_varsize_offsets_to_gcpointers_in_var_part, self.q_weakpointer_offset, - self.q_member_index) + self.q_member_index, + self.q_is_rpython_class) # the lowest 16bits are used to store group member index @@ -123,6 +128,7 @@ T_HAS_GCPTR_IN_VARSIZE = 0x20000 T_IS_GCARRAY_OF_GCPTR = 0x40000 T_IS_WEAKREF = 0x80000 +T_IS_RPYTHON_INSTANCE = 0x100000 # the type is a subclass of OBJECT T_KEY_MASK = intmask(0xFF000000) T_KEY_VALUE = intmask(0x7A000000) # bug detection only @@ -181,6 +187,8 @@ varinfo.varitemsize = llmemory.sizeof(ARRAY.OF) if builder.is_weakref_type(TYPE): infobits |= T_IS_WEAKREF + if is_subclass_of_object(TYPE): + infobits |= T_IS_RPYTHON_INSTANCE info.infobits = infobits | T_KEY_VALUE # ____________________________________________________________ @@ -259,9 +267,7 @@ else: # no vtable from lltype2vtable -- double-check to be sure # that it's not a subclass of OBJECT. - while isinstance(TYPE, lltype.GcStruct): - assert TYPE is not rclass.OBJECT - _, TYPE = TYPE._first_struct() + assert not is_subclass_of_object(TYPE) def get_info(self, type_id): res = llop.get_group_member(GCData.TYPE_INFO_PTR, @@ -437,6 +443,13 @@ for i in range(p._obj.getlength()): zero_gc_pointers_inside(p[i], ITEM) +def is_subclass_of_object(TYPE): + while isinstance(TYPE, lltype.GcStruct): + if TYPE is rclass.OBJECT: + return True + _, TYPE = TYPE._first_struct() + return False + ########## weakrefs ########## # framework: weakref objects are small structures containing only an address Modified: pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py Mon Sep 20 13:19:51 2010 @@ -15,6 +15,8 @@ self.llinterp = llinterp self.prepare_graphs(flowgraphs) self.gc.setup() + self.has_write_barrier_from_array = hasattr(self.gc, + 'write_barrier_from_array') def prepare_graphs(self, flowgraphs): lltype2vtable = self.llinterp.typer.lltype2vtable @@ -78,13 +80,30 @@ ARRAY = lltype.typeOf(array).TO addr = llmemory.cast_ptr_to_adr(array) addr += llmemory.itemoffsetof(ARRAY, index) - self.setinterior(array, addr, ARRAY.OF, newitem) + self.setinterior(array, addr, ARRAY.OF, newitem, (index,)) - def setinterior(self, toplevelcontainer, inneraddr, INNERTYPE, newvalue): + def setinterior(self, toplevelcontainer, inneraddr, INNERTYPE, newvalue, + offsets=()): if (lltype.typeOf(toplevelcontainer).TO._gckind == 'gc' and isinstance(INNERTYPE, lltype.Ptr) and INNERTYPE.TO._gckind == 'gc'): - self.gc.write_barrier(llmemory.cast_ptr_to_adr(newvalue), - llmemory.cast_ptr_to_adr(toplevelcontainer)) + # + wb = True + if self.has_write_barrier_from_array: + for index in offsets: + if type(index) is not str: + assert (type(index) is int # <- fast path + or lltype.typeOf(index) == lltype.Signed) + self.gc.write_barrier_from_array( + llmemory.cast_ptr_to_adr(newvalue), + llmemory.cast_ptr_to_adr(toplevelcontainer), + index) + wb = False + break + # + if wb: + self.gc.write_barrier( + llmemory.cast_ptr_to_adr(newvalue), + llmemory.cast_ptr_to_adr(toplevelcontainer)) llheap.setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue) def collect(self, *gen): Modified: pypy/branch/fast-forward/pypy/rpython/memory/lltypelayout.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/lltypelayout.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/lltypelayout.py Mon Sep 20 13:19:51 2010 @@ -7,7 +7,7 @@ primitive_to_fmt = {lltype.Signed: "l", lltype.Unsigned: "L", lltype.Char: "c", - lltype.UniChar: "H", # maybe + lltype.UniChar: "i", # 4 bytes lltype.Bool: "B", lltype.Float: "d", llmemory.Address: "P", Modified: pypy/branch/fast-forward/pypy/rpython/memory/support.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/support.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/support.py Mon Sep 20 13:19:51 2010 @@ -216,6 +216,24 @@ self.index_in_oldest = index + 1 return result + def foreach(self, callback, arg): + """Invoke 'callback(address, arg)' for all addresses in the deque. + Typically, 'callback' is a bound method and 'arg' can be None. + """ + chunk = self.oldest_chunk + index = self.index_in_oldest + while chunk is not self.newest_chunk: + while index < chunk_size: + callback(chunk.items[index], arg) + index += 1 + chunk = chunk.next + index = 0 + limit = self.index_in_newest + while index < limit: + callback(chunk.items[index], arg) + index += 1 + foreach._annspecialcase_ = 'specialize:arg(1)' + def delete(self): cur = self.oldest_chunk while cur: Modified: pypy/branch/fast-forward/pypy/rpython/memory/test/test_gc.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/test/test_gc.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/test/test_gc.py Mon Sep 20 13:19:51 2010 @@ -26,8 +26,9 @@ class GCTest(object): GC_PARAMS = {} GC_CAN_MOVE = False - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True GC_CAN_SHRINK_ARRAY = False + GC_CAN_SHRINK_BIG_ARRAY = False def setup_class(cls): cls._saved_logstate = py.log._getstate() @@ -451,10 +452,10 @@ a = rgc.malloc_nonmovable(TP, 3) if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 - assert self.interpret(func, []) == int(self.GC_CANNOT_MALLOC_NONMOVABLE) + assert self.interpret(func, []) == int(self.GC_CAN_MALLOC_NONMOVABLE) def test_malloc_nonmovable_fixsize(self): S = lltype.GcStruct('S', ('x', lltype.Float)) @@ -465,37 +466,36 @@ rgc.collect() if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 except Exception, e: return 2 - assert self.interpret(func, []) == int(self.GC_CANNOT_MALLOC_NONMOVABLE) + assert self.interpret(func, []) == int(self.GC_CAN_MALLOC_NONMOVABLE) def test_shrink_array(self): from pypy.rpython.lltypesystem.rstr import STR - GC_CAN_SHRINK_ARRAY = self.GC_CAN_SHRINK_ARRAY - def f(n, m): + def f(n, m, gc_can_shrink_array): ptr = lltype.malloc(STR, n) ptr.hash = 0x62 ptr.chars[0] = 'A' ptr.chars[1] = 'B' ptr.chars[2] = 'C' ptr2 = rgc.ll_shrink_array(ptr, 2) - assert (ptr == ptr2) == GC_CAN_SHRINK_ARRAY + assert (ptr == ptr2) == gc_can_shrink_array rgc.collect() return ( ord(ptr2.chars[0]) + (ord(ptr2.chars[1]) << 8) + (len(ptr2.chars) << 16) + (ptr2.hash << 24)) - assert self.interpret(f, [3, 0]) == 0x62024241 - # don't test with larger numbers of top of the Hybrid GC, because - # the default settings make it a too-large varsized object that - # gets allocated outside the semispace - if not isinstance(self, TestHybridGC): - assert self.interpret(f, [12, 0]) == 0x62024241 + flag = self.GC_CAN_SHRINK_ARRAY + assert self.interpret(f, [3, 0, flag]) == 0x62024241 + # with larger numbers, it gets allocated outside the semispace + # with some GCs. + flag = self.GC_CAN_SHRINK_BIG_ARRAY + assert self.interpret(f, [12, 0, flag]) == 0x62024241 def test_tagged_simple(self): from pypy.rlib.objectmodel import UnboxedValue @@ -568,7 +568,7 @@ assert res == 111 def test_writebarrier_before_copy(self): - S = lltype.GcStruct('S') + S = lltype.GcStruct('S', ('x', lltype.Char)) TP = lltype.GcArray(lltype.Ptr(S)) def fn(): l = lltype.malloc(TP, 100) @@ -628,8 +628,9 @@ class TestSemiSpaceGC(GCTest, snippet.SemiSpaceGCTests): from pypy.rpython.memory.gc.semispace import SemiSpaceGC as GCClass GC_CAN_MOVE = True - GC_CANNOT_MALLOC_NONMOVABLE = True + GC_CAN_MALLOC_NONMOVABLE = False GC_CAN_SHRINK_ARRAY = True + GC_CAN_SHRINK_BIG_ARRAY = True class TestGrowingSemiSpaceGC(TestSemiSpaceGC): GC_PARAMS = {'space_size': 16*WORD} @@ -641,16 +642,15 @@ from pypy.rpython.memory.gc.markcompact import MarkCompactGC as GCClass GC_PARAMS = {'space_size': 65536+16384} GC_CAN_SHRINK_ARRAY = False + GC_CAN_SHRINK_BIG_ARRAY = False def test_finalizer_order(self): py.test.skip("Not implemented yet") - def test_writebarrier_before_copy(self): - py.test.skip("Not relevant, and crashes because llarena does not " - "support empty GcStructs") class TestHybridGC(TestGenerationalGC): from pypy.rpython.memory.gc.hybrid import HybridGC as GCClass - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True + GC_CAN_SHRINK_BIG_ARRAY = False def test_ref_from_rawmalloced_to_regular(self): import gc @@ -720,7 +720,7 @@ from pypy.rpython.memory.gc.hybrid import HybridGC as GCClass GC_CAN_MOVE = False # with this size of heap, stuff gets allocated # in 3rd gen. - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True GC_PARAMS = {'space_size': 48*WORD, 'min_nursery_size': 12*WORD, 'nursery_size': 12*WORD, @@ -764,3 +764,13 @@ def test_malloc_nonmovable_fixsize(self): py.test.skip("Not supported") + + +class TestMiniMarkGC(TestSemiSpaceGC): + from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass + GC_CAN_SHRINK_BIG_ARRAY = False + GC_CAN_MALLOC_NONMOVABLE = True + +class TestMiniMarkGCCardMarking(TestMiniMarkGC): + GC_PARAMS = {'card_page_indices': 4, + 'card_page_indices_min': 10} Modified: pypy/branch/fast-forward/pypy/rpython/memory/test/test_support.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/test/test_support.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/test/test_support.py Mon Sep 20 13:19:51 2010 @@ -113,6 +113,27 @@ deque.append(x) expected.append(x) + def test_foreach(self): + AddressDeque = get_address_deque(10) + ll = AddressDeque() + for num_entries in range(30, -1, -1): + addrs = [raw_malloc(llmemory.sizeof(lltype.Signed)) + for i in range(num_entries)] + for a in addrs: + ll.append(a) + + seen = [] + def callback(addr, fortytwo): + assert fortytwo == 42 + seen.append(addr) + + ll.foreach(callback, 42) + assert seen == addrs + for a in addrs: + b = ll.popleft() + assert a == b + assert not ll.non_empty() + def test_stack_annotate(): AddressStack = get_address_stack(60) Modified: pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py Mon Sep 20 13:19:51 2010 @@ -47,7 +47,7 @@ gcpolicy = None stacklessgc = False GC_CAN_MOVE = False - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True taggedpointers = False def setup_class(cls): @@ -242,6 +242,26 @@ heap_size = self.heap_usage(statistics) assert heap_size < 16000 * WORD / 4 # xxx + def define_llinterp_dict(self): + class A(object): + pass + def malloc_a_lot(): + i = 0 + while i < 10: + i += 1 + a = (1, 2, i) + b = {a: A()} + j = 0 + while j < 20: + j += 1 + b[1, j, i] = A() + return 0 + return malloc_a_lot + + def test_llinterp_dict(self): + run = self.runner("llinterp_dict") + run([]) + def skipdefine_global_list(cls): gl = [] class Box: @@ -602,8 +622,8 @@ rgc.collect() if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 #except Exception, e: # return 2 @@ -611,7 +631,7 @@ def test_malloc_nonmovable(self): run = self.runner("malloc_nonmovable") - assert int(self.GC_CANNOT_MALLOC_NONMOVABLE) == run([]) + assert int(self.GC_CAN_MALLOC_NONMOVABLE) == run([]) def define_malloc_nonmovable_fixsize(cls): S = lltype.GcStruct('S', ('x', lltype.Float)) @@ -622,8 +642,8 @@ rgc.collect() if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 except Exception, e: return 2 @@ -631,7 +651,7 @@ def test_malloc_nonmovable_fixsize(self): run = self.runner("malloc_nonmovable_fixsize") - assert run([]) == int(self.GC_CANNOT_MALLOC_NONMOVABLE) + assert run([]) == int(self.GC_CAN_MALLOC_NONMOVABLE) def define_shrink_array(cls): from pypy.rpython.lltypesystem.rstr import STR @@ -680,7 +700,8 @@ class GenericMovingGCTests(GenericGCTests): GC_CAN_MOVE = True - GC_CANNOT_MALLOC_NONMOVABLE = True + GC_CAN_MALLOC_NONMOVABLE = False + GC_CAN_TEST_ID = False def define_many_ids(cls): class A(object): @@ -710,7 +731,8 @@ return f def test_many_ids(self): - py.test.skip("fails for bad reasons in lltype.py :-(") + if not self.GC_CAN_TEST_ID: + py.test.skip("fails for bad reasons in lltype.py :-(") run = self.runner("many_ids") run([]) @@ -856,7 +878,7 @@ # (and give fixedsize) def define_writebarrier_before_copy(cls): - S = lltype.GcStruct('S') + S = lltype.GcStruct('S', ('x', lltype.Char)) TP = lltype.GcArray(lltype.Ptr(S)) def fn(): l = lltype.malloc(TP, 100) @@ -1144,10 +1166,6 @@ GC_PARAMS = {'space_size': 4096*WORD} root_stack_depth = 200 - def test_writebarrier_before_copy(self): - py.test.skip("Not relevant, and crashes because llarena does not " - "support empty GcStructs") - class TestGenerationGC(GenericMovingGCTests): gcname = "generation" GC_CAN_SHRINK_ARRAY = True @@ -1379,7 +1397,7 @@ class TestHybridGC(TestGenerationGC): gcname = "hybrid" - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True class gcpolicy(gc.FrameworkGcPolicy): class transformerclass(framework.FrameworkGCTransformer): @@ -1444,6 +1462,23 @@ def test_malloc_nonmovable_fixsize(self): py.test.skip("not supported") + +class TestMiniMarkGC(TestHybridGC): + gcname = "minimark" + GC_CAN_TEST_ID = True + + class gcpolicy(gc.FrameworkGcPolicy): + class transformerclass(framework.FrameworkGCTransformer): + from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass + GC_PARAMS = {'nursery_size': 32*WORD, + 'page_size': 16*WORD, + 'arena_size': 64*WORD, + 'small_request_threshold': 5*WORD, + 'card_page_indices': 4, + 'card_page_indices_min': 10, + } + root_stack_depth = 200 + # ________________________________________________________________ # tagged pointers Modified: pypy/branch/fast-forward/pypy/rpython/rptr.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/rptr.py (original) +++ pypy/branch/fast-forward/pypy/rpython/rptr.py Mon Sep 20 13:19:51 2010 @@ -35,6 +35,9 @@ id = lltype.cast_ptr_to_int(p) return ll_str.ll_int2hex(r_uint(id), True) + def get_ll_eq_function(self): + return None + def rtype_getattr(self, hop): attr = hop.args_s[1].const if isinstance(hop.s_result, annmodel.SomeLLADTMeth): Modified: pypy/branch/fast-forward/pypy/translator/c/funcgen.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/funcgen.py (original) +++ pypy/branch/fast-forward/pypy/translator/c/funcgen.py Mon Sep 20 13:19:51 2010 @@ -733,6 +733,8 @@ continue elif T == Signed: format.append('%ld') + elif T == Unsigned: + format.append('%lu') elif T == Float: format.append('%f') elif isinstance(T, Ptr) or T == Address: Modified: pypy/branch/fast-forward/pypy/translator/c/src/mem.h ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/src/mem.h (original) +++ pypy/branch/fast-forward/pypy/translator/c/src/mem.h Mon Sep 20 13:19:51 2010 @@ -224,3 +224,13 @@ #define OP_CAST_PTR_TO_WEAKREFPTR(x, r) r = x #define OP_CAST_WEAKREFPTR_TO_PTR(x, r) r = x + +/************************************************************/ +/* dummy version of these operations, e.g. with Boehm */ + +#define OP_GC_GET_RPY_ROOTS(r) r = 0 +#define OP_GC_GET_RPY_REFERENTS(x, r) r = 0 +#define OP_GC_GET_RPY_MEMORY_USAGE(x, r) r = -1 +#define OP_GC_GET_RPY_TYPE_INDEX(x, r) r = -1 +#define OP_GC_IS_RPY_INSTANCE(x, r) r = 0 +#define OP_GC_DUMP_RPY_HEAP(r) r = 0 Modified: pypy/branch/fast-forward/pypy/translator/c/test/test_newgc.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/test/test_newgc.py (original) +++ pypy/branch/fast-forward/pypy/translator/c/test/test_newgc.py Mon Sep 20 13:19:51 2010 @@ -2,7 +2,7 @@ import sys, os, inspect from pypy.objspace.flow.model import summary -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.memory.test import snippet from pypy.rlib import rgc @@ -19,10 +19,11 @@ removetypeptr = False taggedpointers = False GC_CAN_MOVE = False - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True GC_CAN_SHRINK_ARRAY = False _isolated_func = None + c_allfuncs = None @classmethod def _makefunc_str_int(cls, f): @@ -111,6 +112,7 @@ def teardown_class(cls): if hasattr(cls.c_allfuncs, 'close_isolate'): cls.c_allfuncs.close_isolate() + cls.c_allfuncs = None def run(self, name, *args): if not args: @@ -690,8 +692,8 @@ rgc.collect() if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 except Exception, e: return 2 @@ -699,7 +701,7 @@ def test_malloc_nonmovable(self): res = self.run('malloc_nonmovable') - assert res == self.GC_CANNOT_MALLOC_NONMOVABLE + assert res == self.GC_CAN_MALLOC_NONMOVABLE def define_resizable_buffer(cls): from pypy.rpython.lltypesystem.rstr import STR @@ -891,12 +893,208 @@ def test_arraycopy_writebarrier_ptr(self): self.run("arraycopy_writebarrier_ptr") + def define_get_rpy_roots(self): + U = lltype.GcStruct('U', ('x', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + + def g(s): + lst = rgc.get_rpy_roots() + found = False + for x in lst: + if x == lltype.cast_opaque_ptr(llmemory.GCREF, s): + found = True + if x == lltype.cast_opaque_ptr(llmemory.GCREF, s.u): + os.write(2, "s.u should not be found!\n") + assert False + return found == 1 + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + found = g(s) + if not found: + os.write(2, "not found!\n") + assert False + s.u.x = 42 + return 0 + + return fn + + def test_get_rpy_roots(self): + self.run("get_rpy_roots") + + def define_get_rpy_referents(self): + U = lltype.GcStruct('U', ('x', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, s.u) + lst = rgc.get_rpy_referents(gcref1) + assert gcref2 in lst + assert gcref1 not in lst + s.u.x = 42 + return 0 + + return fn + + def test_get_rpy_referents(self): + self.run("get_rpy_referents") + + def define_is_rpy_instance(self): + class Foo: + pass + S = lltype.GcStruct('S', ('x', lltype.Signed)) + + def check(gcref, expected): + result = rgc._is_rpy_instance(gcref) + assert result == expected + + def fn(): + s = lltype.malloc(S) + gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + check(gcref1, False) + + f = Foo() + gcref3 = rgc.cast_instance_to_gcref(f) + check(gcref3, True) + + return 0 + + return fn + + def test_is_rpy_instance(self): + self.run("is_rpy_instance") + + def define_try_cast_gcref_to_instance(self): + class Foo: + pass + class FooBar(Foo): + pass + class Biz(object): + pass + S = lltype.GcStruct('S', ('x', lltype.Signed)) + + def fn(): + foo = Foo() + gcref1 = rgc.cast_instance_to_gcref(foo) + assert rgc.try_cast_gcref_to_instance(Foo, gcref1) is foo + assert rgc.try_cast_gcref_to_instance(FooBar, gcref1) is None + assert rgc.try_cast_gcref_to_instance(Biz, gcref1) is None + + foobar = FooBar() + gcref2 = rgc.cast_instance_to_gcref(foobar) + assert rgc.try_cast_gcref_to_instance(Foo, gcref2) is foobar + assert rgc.try_cast_gcref_to_instance(FooBar, gcref2) is foobar + assert rgc.try_cast_gcref_to_instance(Biz, gcref2) is None + + s = lltype.malloc(S) + gcref3 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + assert rgc.try_cast_gcref_to_instance(Foo, gcref3) is None + assert rgc.try_cast_gcref_to_instance(FooBar, gcref3) is None + assert rgc.try_cast_gcref_to_instance(Biz, gcref3) is None + + return 0 + + return fn + + def test_try_cast_gcref_to_instance(self): + self.run("try_cast_gcref_to_instance") + + def define_get_rpy_memory_usage(self): + U = lltype.GcStruct('U', ('x1', lltype.Signed), + ('x2', lltype.Signed), + ('x3', lltype.Signed), + ('x4', lltype.Signed), + ('x5', lltype.Signed), + ('x6', lltype.Signed), + ('x7', lltype.Signed), + ('x8', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + A = lltype.GcArray(lltype.Ptr(S)) + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + a = lltype.malloc(A, 1000) + gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + int1 = rgc.get_rpy_memory_usage(gcref1) + assert 8 <= int1 <= 32 + gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, s.u) + int2 = rgc.get_rpy_memory_usage(gcref2) + assert 4*9 <= int2 <= 8*12 + gcref3 = lltype.cast_opaque_ptr(llmemory.GCREF, a) + int3 = rgc.get_rpy_memory_usage(gcref3) + assert 4*1001 <= int3 <= 8*1010 + return 0 + + return fn + + def test_get_rpy_memory_usage(self): + self.run("get_rpy_memory_usage") + + def define_get_rpy_type_index(self): + U = lltype.GcStruct('U', ('x', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + A = lltype.GcArray(lltype.Ptr(S)) + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + a = lltype.malloc(A, 1000) + s2 = lltype.malloc(S) + gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + int1 = rgc.get_rpy_type_index(gcref1) + gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, s.u) + int2 = rgc.get_rpy_type_index(gcref2) + gcref3 = lltype.cast_opaque_ptr(llmemory.GCREF, a) + int3 = rgc.get_rpy_type_index(gcref3) + gcref4 = lltype.cast_opaque_ptr(llmemory.GCREF, s2) + int4 = rgc.get_rpy_type_index(gcref4) + assert int1 != int2 + assert int1 != int3 + assert int2 != int3 + assert int1 == int4 + return 0 + + return fn + + def test_get_rpy_type_index(self): + self.run("get_rpy_type_index") + + filename_dump = str(udir.join('test_dump_rpy_heap')) + def define_dump_rpy_heap(self): + U = lltype.GcStruct('U', ('x', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + A = lltype.GcArray(lltype.Ptr(S)) + filename = self.filename_dump + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + a = lltype.malloc(A, 1000) + s2 = lltype.malloc(S) + # + fd = os.open(filename, os.O_WRONLY | os.O_CREAT, 0666) + rgc.dump_rpy_heap(fd) + os.close(fd) + return 0 + + return fn + + def test_dump_rpy_heap(self): + self.run("dump_rpy_heap") + assert os.path.exists(self.filename_dump) + assert os.path.getsize(self.filename_dump) > 0 # minimal test + class TestSemiSpaceGC(TestUsingFramework, snippet.SemiSpaceGCTestDefines): gcpolicy = "semispace" should_be_moving = True GC_CAN_MOVE = True - GC_CANNOT_MALLOC_NONMOVABLE = True + GC_CAN_MALLOC_NONMOVABLE = False GC_CAN_SHRINK_ARRAY = True # for snippets @@ -1055,7 +1253,7 @@ class TestHybridGC(TestGenerationalGC): gcpolicy = "hybrid" should_be_moving = True - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True def test_gc_set_max_heap_size(self): py.test.skip("not implemented") @@ -1126,6 +1324,15 @@ res = self.run("adding_a_hash") assert res == 0 +class TestMiniMarkGC(TestSemiSpaceGC): + gcpolicy = "minimark" + should_be_moving = True + GC_CAN_MALLOC_NONMOVABLE = True + GC_CAN_SHRINK_ARRAY = True + + def test_gc_heap_stats(self): + py.test.skip("not implemented") + # ____________________________________________________________________ class TaggedPointersTest(object): @@ -1180,3 +1387,6 @@ class TestMarkCompactGCMostCompact(TaggedPointersTest, TestMarkCompactGC): removetypeptr = True + +class TestMiniMarkGCMostCompact(TaggedPointersTest, TestMiniMarkGC): + removetypeptr = True Modified: pypy/branch/fast-forward/pypy/translator/exceptiontransform.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/exceptiontransform.py (original) +++ pypy/branch/fast-forward/pypy/translator/exceptiontransform.py Mon Sep 20 13:19:51 2010 @@ -277,7 +277,9 @@ block.exits[0].target is graph.returnblock and len(block.operations) and (block.exits[0].args[0].concretetype is lltype.Void or - block.exits[0].args[0] is block.operations[-1].result)): + block.exits[0].args[0] is block.operations[-1].result) and + block.operations[-1].opname not in ('malloc', # special cases + 'malloc_nonmovable')): last_operation -= 1 lastblock = block for i in range(last_operation, -1, -1): @@ -466,6 +468,9 @@ c_flags = spaceop.args[1] c_flags.value = c_flags.value.copy() spaceop.args[1].value['zero'] = True + # NB. when inserting more special-cases here, keep in mind that + # you also need to list the opnames in transform_block() + # (see "special cases") if insert_zeroing_op: if normalafterblock is None: From antocuni at codespeak.net Mon Sep 20 14:05:53 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Mon, 20 Sep 2010 14:05:53 +0200 (CEST) Subject: [pypy-svn] r77201 - in pypy/branch/resoperation-refactoring/pypy/jit/backend/x86: . test Message-ID: <20100920120553.ED3D1282BE3@codespeak.net> Author: antocuni Date: Mon Sep 20 14:05:52 2010 New Revision: 77201 Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_regalloc.py Log: fix various places in regalloc that instantiated ResOperations with the wrong number of arguments Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/regalloc.py Mon Sep 20 14:05:52 2010 @@ -798,7 +798,7 @@ else: tempbox = None other_loc = imm(ofs_items + (v.getint() << scale)) - self._call(ResOperation(rop.NEW, [v], res_v), + self._call(ResOperation(rop.NEW, [], res_v), [other_loc], [v]) loc = self.rm.make_sure_var_in_reg(v, [res_v]) assert self.loc(res_v) == eax @@ -806,7 +806,7 @@ self.rm.possibly_free_var(v) if tempbox is not None: self.rm.possibly_free_var(tempbox) - self.PerformDiscard(ResOperation(rop.SETFIELD_GC, [], None), + self.PerformDiscard(ResOperation(rop.SETFIELD_GC, [None, None], None), [eax, imm(ofs_length), imm(WORD), loc]) def consider_new_array(self, op): @@ -1027,12 +1027,21 @@ def add_none_argument(fn): return lambda self, op: fn(self, op, None) +def is_comparison_or_ovf_op(opnum): + from pypy.jit.metainterp.resoperation import opclasses, AbstractResOp + cls = opclasses[opnum] + # hack hack: in theory they are instance method, but they don't use + # any instance field, we can use a fake object + class Fake(cls): + pass + op = Fake(None) + return op.is_comparison() or op.is_ovf() + for name, value in RegAlloc.__dict__.iteritems(): if name.startswith('consider_'): name = name[len('consider_'):] num = getattr(rop, name.upper()) - if (ResOperation(num, [], None).is_comparison() - or ResOperation(num, [], None).is_ovf() + if (is_comparison_or_ovf_op(num) or num == rop.CALL_MAY_FORCE or num == rop.CALL_ASSEMBLER): oplist_with_guard[num] = value oplist[num] = add_none_argument(value) Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_regalloc.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_regalloc.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/backend/x86/test/test_regalloc.py Mon Sep 20 14:05:52 2010 @@ -9,7 +9,7 @@ from pypy.jit.backend.llsupport.descr import GcCache from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.regalloc import RegAlloc, X86RegisterManager,\ - FloatConstants + FloatConstants, is_comparison_or_ovf_op from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64 from pypy.jit.metainterp.test.oparser import parse from pypy.rpython.lltypesystem import lltype, llmemory, rffi @@ -17,6 +17,11 @@ from pypy.rpython.lltypesystem import rclass, rstr from pypy.jit.backend.x86.rx86 import * +def test_is_comparison_or_ovf_op(): + assert not is_comparison_or_ovf_op(rop.INT_ADD) + assert is_comparison_or_ovf_op(rop.INT_ADD_OVF) + assert is_comparison_or_ovf_op(rop.INT_EQ) + CPU = getcpuclass() class MockGcDescr(GcCache): def get_funcptr_for_new(self): From antocuni at codespeak.net Mon Sep 20 14:46:27 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Mon, 20 Sep 2010 14:46:27 +0200 (CEST) Subject: [pypy-svn] r77202 - in pypy/branch/resoperation-refactoring/pypy/jit/metainterp: . test Message-ID: <20100920124627.5C17C282BE3@codespeak.net> Author: antocuni Date: Mon Sep 20 14:46:25 2010 New Revision: 77202 Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_resoperation.py Log: create common base classes for each combination of arity and {PlainResOp, ResOpWithDescr, GuardResOp}, to avoid putting mixins in all the leaves Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py Mon Sep 20 14:46:25 2010 @@ -518,6 +518,18 @@ opboolresult.append(boolresult) assert len(opclasses)==len(oparity)==len(opwithdescr)==len(opboolresult)==len(_oplist) +def get_base_class(mixin, base): + try: + return get_base_class.cache[(mixin, base)] + except KeyError: + arity_name = mixin.__name__[:-2] # remove the trailing "Op" + name = arity_name + base.__name__ # something like BinaryPlainResOp + bases = (mixin, base) + cls = type(name, bases, {}) + get_base_class.cache[(mixin, base)] = cls + return cls +get_base_class.cache = {} + def create_class_for_op(name, opnum, arity, withdescr): arity2mixin = { 0: NullaryOp, @@ -540,7 +552,7 @@ return opnum cls_name = '%s_OP' % name - bases = (mixin, baseclass) + bases = (get_base_class(mixin, baseclass),) dic = {'getopnum': getopnum} return type(cls_name, bases, dic) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_resoperation.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_resoperation.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_resoperation.py Mon Sep 20 14:46:25 2010 @@ -42,6 +42,15 @@ assert issubclass(cls, rop.UnaryOp) assert cls.getopnum.im_func(None) == rop.rop.GUARD_TRUE +def test_mixins_in_common_base(): + INT_ADD = rop.opclasses[rop.rop.INT_ADD] + assert len(INT_ADD.__bases__) == 1 + BinaryPlainResOp = INT_ADD.__bases__[0] + assert BinaryPlainResOp.__name__ == 'BinaryPlainResOp' + assert BinaryPlainResOp.__bases__ == (rop.BinaryOp, rop.PlainResOp) + INT_SUB = rop.opclasses[rop.rop.INT_SUB] + assert INT_SUB.__bases__[0] is BinaryPlainResOp + def test_instantiate(): op = rop.ResOperation(rop.rop.INT_ADD, ['a', 'b'], 'c') assert op.getarglist() == ['a', 'b'] From afa at codespeak.net Mon Sep 20 15:45:03 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Mon, 20 Sep 2010 15:45:03 +0200 (CEST) Subject: [pypy-svn] r77204 - in pypy/branch/fast-forward/pypy: module/_multiprocessing module/_multiprocessing/test rlib rpython/lltypesystem Message-ID: <20100920134503.55408282BE3@codespeak.net> Author: afa Date: Mon Sep 20 15:45:01 2010 New Revision: 77204 Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_win32.py pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_win32.py pypy/branch/fast-forward/pypy/rlib/rwin32.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/rffi.py Log: More progress in _multiprocessing Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_win32.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_win32.py (original) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_win32.py Mon Sep 20 15:45:01 2010 @@ -1,30 +1,157 @@ -from pypy.interpreter import gateway +from pypy.interpreter.gateway import ObjSpace, W_Root, unwrap_spec, interp2app from pypy.interpreter.function import StaticMethod from pypy.interpreter.error import wrap_windowserror from pypy.rlib import rwin32 -from pypy.rpython.lltypesystem import rffi +from pypy.rlib.rarithmetic import r_uint +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.translator.tool.cbuild import ExternalCompilationInfo +from pypy.rpython.tool import rffi_platform + +CONSTANTS = """ + PIPE_ACCESS_INBOUND PIPE_ACCESS_DUPLEX + GENERIC_READ GENERIC_WRITE OPEN_EXISTING + PIPE_TYPE_MESSAGE PIPE_READMODE_MESSAGE PIPE_WAIT + PIPE_UNLIMITED_INSTANCES + NMPWAIT_WAIT_FOREVER + ERROR_PIPE_CONNECTED ERROR_SEM_TIMEOUT ERROR_PIPE_BUSY +""".split() + +class CConfig: + _compilation_info_ = ExternalCompilationInfo( + includes = ['windows.h'], + libraries = ['kernel32'], + ) + + for name in CONSTANTS: + locals()[name] = rffi_platform.ConstantInteger(name) + +class cConfig: + pass +cConfig.__dict__.update(rffi_platform.configure(CConfig)) def handle_w(space, w_handle): return rffi.cast(rwin32.HANDLE, space.int_w(w_handle)) +def w_handle(space, handle): + return space.wrap(rffi.cast(rffi.INTPTR_T, handle)) + +_CreateNamedPipe = rwin32.winexternal( + 'CreateNamedPipeA', [ + rwin32.LPCSTR, + rwin32.DWORD, rwin32.DWORD, rwin32.DWORD, + rwin32.DWORD, rwin32.DWORD, rwin32.DWORD, + rffi.VOIDP], + rwin32.HANDLE) + +_ConnectNamedPipe = rwin32.winexternal( + 'ConnectNamedPipe', [rwin32.HANDLE, rffi.VOIDP], rwin32.BOOL) + +_SetNamedPipeHandleState = rwin32.winexternal( + 'SetNamedPipeHandleState', [ + rwin32.HANDLE, + rwin32.LPDWORD, rwin32.LPDWORD, rwin32.LPDWORD], + rwin32.BOOL) + +_CreateFile = rwin32.winexternal( + 'CreateFileA', [ + rwin32.LPCSTR, + rwin32.DWORD, rwin32.DWORD, rffi.VOIDP, + rwin32.DWORD, rwin32.DWORD, rwin32.HANDLE], + rwin32.HANDLE) def CloseHandle(space, w_handle): handle = handle_w(space, w_handle) if not rwin32.CloseHandle(handle): raise wrap_windowserror(space, rwin32.lastWindowsError()) +def GetLastError(space): + return space.wrap(rwin32.lastWindowsError()) + + at unwrap_spec(ObjSpace, str, r_uint, r_uint, r_uint, r_uint, r_uint, r_uint, W_Root) +def CreateNamedPipe(space, name, openmode, pipemode, maxinstances, + outputsize, inputsize, timeout, w_security): + security = space.int_w(w_security) + if security: + raise OperationError(space.w_NotImplementedError, + space.wrap("expected a NULL pointer")) + handle = _CreateNamedPipe( + name, openmode, pipemode, maxinstances, + outputsize, inputsize, timeout, rffi.NULL) + + if handle == rwin32.INVALID_HANDLE_VALUE: + raise wrap_windowserror(space, rwin32.lastWindowsError()) + + return w_handle(space, handle) + +def ConnectNamedPipe(space, w_handle, w_overlapped): + handle = handle_w(space, w_handle) + overlapped = space.int_w(w_overlapped) + if overlapped: + raise OperationError(space.w_NotImplementedError, + space.wrap("expected a NULL pointer")) + if not _ConnectNamedPipe(handle, rffi.NULL): + raise wrap_windowserror(space, rwin32.lastWindowsError()) + + at unwrap_spec(ObjSpace, W_Root, W_Root, W_Root, W_Root) +def SetNamedPipeHandleState(space, w_handle, w_pipemode, w_maxinstances, w_timeout): + handle = handle_w(space, w_handle) + state = lltype.malloc(rffi.CArrayPtr(rffi.UINT).TO, 3, flavor='raw') + statep = lltype.malloc(rffi.CArrayPtr(rffi.UINTP).TO, 3, flavor='raw', zero=True) + try: + if not space.is_w(w_pipemode, space.w_None): + state[0] = space.uint_w(w_pipemode) + statep[0] = rffi.ptradd(state, 0) + if not space.is_w(w_maxinstances, space.w_None): + state[1] = space.uint_w(w_maxinstances) + statep[1] = rffi.ptradd(state, 1) + if not space.is_w(w_timeout, space.w_None): + state[2] = space.uint_w(w_timeout) + statep[2] = rffi.ptradd(state, 2) + if not _SetNamedPipeHandleState(handle, statep[0], statep[1], statep[2]): + raise wrap_windowserror(space, rwin32.lastWindowsError()) + finally: + lltype.free(state, flavor='raw') + lltype.free(statep, flavor='raw') + + at unwrap_spec(ObjSpace, str, r_uint, r_uint, W_Root, r_uint, r_uint, W_Root) +def CreateFile(space, filename, access, share, w_security, + disposition, flags, w_templatefile): + security = space.int_w(w_security) + templatefile = space.int_w(w_templatefile) + if security or templatefile: + raise OperationError(space.w_NotImplementedError, + space.wrap("expected a NULL pointer")) + + handle = _CreateFile(filename, access, share, rffi.NULL, + disposition, flags, rwin32.NULL_HANDLE) + + if handle == rwin32.INVALID_HANDLE_VALUE: + raise wrap_windowserror(space, rwin32.lastWindowsError()) + + return w_handle(space, handle) + def win32_namespace(space): "NOT_RPYTHON" w_win32 = space.call_function(space.w_type, space.wrap("win32"), space.newtuple([]), space.newdict()) - try: - for name in ['CloseHandle', - ]: - function = globals()[name] - w_function = space.wrap(gateway.interp2app(function)) - w_method = space.wrap(StaticMethod(w_function)) - space.setattr(w_win32, space.wrap(name), w_method) - except Exception, e: - import pdb; pdb.set_trace() + # constants + for name in CONSTANTS: + space.setattr(w_win32, + space.wrap(name), + space.wrap(getattr(cConfig, name))) + space.setattr(w_win32, + space.wrap('NULL'), + space.newint(0)) + + # functions + for name in ['CloseHandle', 'GetLastError', 'CreateFile', + 'CreateNamedPipe', 'ConnectNamedPipe', + 'SetNamedPipeHandleState', + ]: + function = globals()[name] + w_function = space.wrap(interp2app(function)) + w_method = space.wrap(StaticMethod(w_function)) + space.setattr(w_win32, space.wrap(name), w_method) + return w_win32 Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_win32.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_win32.py (original) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_win32.py Mon Sep 20 15:45:01 2010 @@ -9,5 +9,33 @@ cls.space = gettestobjspace(usemodules=('_multiprocessing',)) def test_CloseHandle(self): - import _multiprocessing - raises(WindowsError, _multiprocessing.win32.CloseHandle, -1) + from _multiprocessing import win32 + raises(WindowsError, win32.CloseHandle, -1) + + def test_pipe(self): + from _multiprocessing import win32 + import os + address = r'\\.\pipe\pypy-test-%s' % (os.getpid()) + openmode = win32.PIPE_ACCESS_INBOUND + access = win32.GENERIC_WRITE + obsize, ibsize = 0, 8192 + readhandle = win32.CreateNamedPipe( + address, openmode, + win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE | + win32.PIPE_WAIT, + 1, obsize, ibsize, win32.NMPWAIT_WAIT_FOREVER, win32.NULL + ) + writehandle = win32.CreateFile( + address, access, 0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL + ) + win32.SetNamedPipeHandleState( + writehandle, win32.PIPE_READMODE_MESSAGE, None, None) + + try: + win32.ConnectNamedPipe(readhandle, win32.NULL) + except WindowsError, e: + if e.args[0] != win32.ERROR_PIPE_CONNECTED: + raise + + win32.CloseHandle(readhandle) + win32.CloseHandle(writehandle) Modified: pypy/branch/fast-forward/pypy/rlib/rwin32.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rwin32.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rwin32.py Mon Sep 20 15:45:01 2010 @@ -41,7 +41,7 @@ LPCSTR = rffi_platform.SimpleType("LPCSTR", rffi.CCHARP) LPWSTR = rffi_platform.SimpleType("LPWSTR", rffi.CWCHARP) LPCWSTR = rffi_platform.SimpleType("LPCWSTR", rffi.CWCHARP) - LPDWORD = rffi_platform.SimpleType("LPDWORD", rffi.INTP) + LPDWORD = rffi_platform.SimpleType("LPDWORD", rffi.UINTP) SIZE_T = rffi_platform.SimpleType("SIZE_T", rffi.SIZE_T) ULONG_PTR = rffi_platform.SimpleType("ULONG_PTR", rffi.ULONG) Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/rffi.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/rffi.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/rffi.py Mon Sep 20 15:45:01 2010 @@ -551,6 +551,7 @@ # void * - for now, represented as char * VOIDP = lltype.Ptr(lltype.Array(lltype.Char, hints={'nolength': True})) VOIDP_real = lltype.Ptr(lltype.Array(lltype.Char, hints={'nolength': True, 'render_as_void': True})) +NULL = lltype.nullptr(VOIDP.TO) # void ** VOIDPP = CArrayPtr(VOIDP) From afa at codespeak.net Mon Sep 20 16:38:41 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Mon, 20 Sep 2010 16:38:41 +0200 (CEST) Subject: [pypy-svn] r77207 - pypy/branch/fast-forward/pypy/module/_winreg Message-ID: <20100920143841.AD1B1282BE3@codespeak.net> Author: afa Date: Mon Sep 20 16:38:40 2010 New Revision: 77207 Modified: pypy/branch/fast-forward/pypy/module/_winreg/interp_winreg.py Log: Fix translation Modified: pypy/branch/fast-forward/pypy/module/_winreg/interp_winreg.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_winreg/interp_winreg.py (original) +++ pypy/branch/fast-forward/pypy/module/_winreg/interp_winreg.py Mon Sep 20 16:38:40 2010 @@ -5,6 +5,7 @@ from pypy.interpreter.error import OperationError from pypy.rpython.lltypesystem import rffi, lltype from pypy.rlib import rwinreg, rwin32 +from pypy.rlib.rarithmetic import r_uint def raiseWindowsError(space, errcode, context): message = rwin32.FormatError(errcode) @@ -596,7 +597,7 @@ try: retValueSize = lltype.malloc(rwin32.LPDWORD.TO, 1, flavor='raw') try: - retValueSize[0] = 256 # includes NULL terminator + retValueSize[0] = r_uint(256) # includes NULL terminator ret = rwinreg.RegEnumKeyEx(hkey, index, buf, retValueSize, null_dword, None, null_dword, lltype.nullptr(rwin32.PFILETIME.TO)) From arigo at codespeak.net Mon Sep 20 17:14:39 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Sep 2010 17:14:39 +0200 (CEST) Subject: [pypy-svn] r77208 - pypy/branch/smaller-writebarrier Message-ID: <20100920151439.0190836C22E@codespeak.net> Author: arigo Date: Mon Sep 20 17:14:38 2010 New Revision: 77208 Added: pypy/branch/smaller-writebarrier/ - copied from r77207, pypy/trunk/ Log: A branch in which to reduce the number of arguments and the complexity of the code in the GC write barriers. From arigo at codespeak.net Mon Sep 20 17:40:04 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Sep 2010 17:40:04 +0200 (CEST) Subject: [pypy-svn] r77209 - in pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc: . test Message-ID: <20100920154004.92CCA282BE3@codespeak.net> Author: arigo Date: Mon Sep 20 17:40:01 2010 New Revision: 77209 Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/generation.py pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/test/test_direct.py Log: Reduce the arity and complexity of remember_young_pointer() methods. Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/generation.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/generation.py (original) +++ pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/generation.py Mon Sep 20 17:40:01 2010 @@ -321,7 +321,7 @@ addr = pointer.address[0] newaddr = self.copy(addr) pointer.address[0] = newaddr - self.write_into_last_generation_obj(obj, newaddr) + self.write_into_last_generation_obj(obj) # ____________________________________________________________ # Implementation of nursery-only collections @@ -452,11 +452,12 @@ # "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS - def write_barrier(self, newvalue, addr_struct): + def write_barrier(self, addr_struct): if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS: - self.remember_young_pointer(addr_struct, newvalue) + self.remember_young_pointer(addr_struct) def _setup_wb(self): + DEBUG = self.DEBUG # The purpose of attaching remember_young_pointer to the instance # instead of keeping it as a regular method is to help the JIT call it. # Additionally, it makes the code in write_barrier() marginally smaller @@ -464,33 +465,24 @@ # For x86, there is also an extra requirement: when the JIT calls # remember_young_pointer(), it assumes that it will not touch the SSE # registers, so it does not save and restore them (that's a *hack*!). - def remember_young_pointer(addr_struct, addr): + def remember_young_pointer(addr_struct): #llop.debug_print(lltype.Void, "\tremember_young_pointer", # addr_struct, "<-", addr) - ll_assert(not self.is_in_nursery(addr_struct), - "nursery object with GCFLAG_NO_YOUNG_PTRS") - # if we have tagged pointers around, we first need to check whether - # we have valid pointer here, otherwise we can do it after the - # is_in_nursery check - if (self.config.taggedpointers and - not self.is_valid_gc_object(addr)): - return - if self.is_in_nursery(addr): - self.old_objects_pointing_to_young.append(addr_struct) - self.header(addr_struct).tid &= ~GCFLAG_NO_YOUNG_PTRS - elif (not self.config.taggedpointers and - not self.is_valid_gc_object(addr)): - return - self.write_into_last_generation_obj(addr_struct, addr) + if DEBUG: + ll_assert(not self.is_in_nursery(addr_struct), + "nursery object with GCFLAG_NO_YOUNG_PTRS") + self.old_objects_pointing_to_young.append(addr_struct) + self.header(addr_struct).tid &= ~GCFLAG_NO_YOUNG_PTRS + self.write_into_last_generation_obj(addr_struct) remember_young_pointer._dont_inline_ = True self.remember_young_pointer = remember_young_pointer - def write_into_last_generation_obj(self, addr_struct, addr): + def write_into_last_generation_obj(self, addr_struct): objhdr = self.header(addr_struct) if objhdr.tid & GCFLAG_NO_HEAP_PTRS: - if not self.is_last_generation(addr): - objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS - self.last_generation_root_objects.append(addr_struct) + objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS + self.last_generation_root_objects.append(addr_struct) + write_into_last_generation_obj._always_inline_ = True def assume_young_pointers(self, addr_struct): objhdr = self.header(addr_struct) Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py Mon Sep 20 17:40:01 2010 @@ -675,19 +675,19 @@ # "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS - def write_barrier(self, newvalue, addr_struct): + def write_barrier(self, addr_struct): if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS: - self.remember_young_pointer(addr_struct, newvalue) + self.remember_young_pointer(addr_struct) - def write_barrier_from_array(self, newvalue, addr_array, index): + def write_barrier_from_array(self, addr_array, index): if self.header(addr_array).tid & GCFLAG_NO_YOUNG_PTRS: if self.card_page_indices > 0: # <- constant-folded - self.remember_young_pointer_from_array(addr_array, index, - newvalue) + self.remember_young_pointer_from_array(addr_array, index) else: - self.remember_young_pointer(addr_array, newvalue) + self.remember_young_pointer(addr_array) def _init_writebarrier_logic(self): + DEBUG = self.DEBUG # The purpose of attaching remember_young_pointer to the instance # instead of keeping it as a regular method is to help the JIT call it. # Additionally, it makes the code in write_barrier() marginally smaller @@ -695,30 +695,22 @@ # For x86, there is also an extra requirement: when the JIT calls # remember_young_pointer(), it assumes that it will not touch the SSE # registers, so it does not save and restore them (that's a *hack*!). - def remember_young_pointer(addr_struct, addr): - # 'addr_struct' is the address of the object in which we write; - # 'addr' is the address that we write in 'addr_struct'. - ll_assert(not self.is_in_nursery(addr_struct), - "nursery object with GCFLAG_NO_YOUNG_PTRS") - # if we have tagged pointers around, we first need to check whether - # we have valid pointer here, otherwise we can do it after the - # is_in_nursery check - if (self.config.taggedpointers and - not self.is_valid_gc_object(addr)): - return - # - # Core logic: if the 'addr' is in the nursery, then we need + def remember_young_pointer(addr_struct): + # 'addr_struct' is the address of the object in which we write. + if DEBUG: + ll_assert(not self.is_in_nursery(addr_struct), + "nursery object with GCFLAG_NO_YOUNG_PTRS") + # + # We assume that what we are writing is a pointer to the nursery + # (and don't care for the fact that this new pointer may not + # actually point to the nursery, which seems ok). What we need is # to remove the flag GCFLAG_NO_YOUNG_PTRS and add the old object # to the list 'old_objects_pointing_to_young'. We know that # 'addr_struct' cannot be in the nursery, because nursery objects # never have the flag GCFLAG_NO_YOUNG_PTRS to start with. + self.old_objects_pointing_to_young.append(addr_struct) objhdr = self.header(addr_struct) - if self.is_in_nursery(addr): - self.old_objects_pointing_to_young.append(addr_struct) - objhdr.tid &= ~GCFLAG_NO_YOUNG_PTRS - elif (not self.config.taggedpointers and - not self.is_valid_gc_object(addr)): - return + objhdr.tid &= ~GCFLAG_NO_YOUNG_PTRS # # Second part: if 'addr_struct' is actually a prebuilt GC # object and it's the first time we see a write to it, we @@ -737,17 +729,16 @@ def _init_writebarrier_with_card_marker(self): - def remember_young_pointer_from_array(addr_array, index, addr): + def remember_young_pointer_from_array(addr_array, index): # 'addr_array' is the address of the object in which we write, # which must have an array part; 'index' is the index of the - # item that is (or contains) the pointer that we write; - # 'addr' is the address that we write in the array. + # item that is (or contains) the pointer that we write. objhdr = self.header(addr_array) if objhdr.tid & GCFLAG_HAS_CARDS == 0: # # no cards, use default logic. The 'nocard_logic()' is just # 'remember_young_pointer()', but forced to be inlined here. - nocard_logic(addr_array, addr) + nocard_logic(addr_array) return # # 'addr_array' is a raw_malloc'ed array with card markers @@ -764,22 +755,13 @@ if byte & bitmask: return # - # As in remember_young_pointer, check if 'addr' is a valid - # pointer, in case it can be a tagged integer - if (self.config.taggedpointers and - not self.is_valid_gc_object(addr)): - return - # - # If the 'addr' is in the nursery, then we need to set the flag. - # Note that the following check is done after the bit check - # above, because it is expected that the "bit already set" - # situation is the most common. - if self.is_in_nursery(addr): - addr_byte.char[0] = chr(byte | bitmask) - # - if objhdr.tid & GCFLAG_CARDS_SET == 0: - self.old_objects_with_cards_set.append(addr_array) - objhdr.tid |= GCFLAG_CARDS_SET + # We set the flag (even if the newly written address does not + # actually point to the nursery -- like remember_young_pointer()). + addr_byte.char[0] = chr(byte | bitmask) + # + if objhdr.tid & GCFLAG_CARDS_SET == 0: + self.old_objects_with_cards_set.append(addr_array) + objhdr.tid |= GCFLAG_CARDS_SET nocard_logic = func_with_new_name(self.remember_young_pointer, 'remember_young_pointer_nocard') Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/test/test_direct.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/test/test_direct.py (original) +++ pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/test/test_direct.py Mon Sep 20 17:40:01 2010 @@ -86,19 +86,17 @@ def write(self, p, fieldname, newvalue): if self.gc.needs_write_barrier: - newaddr = llmemory.cast_ptr_to_adr(newvalue) addr_struct = llmemory.cast_ptr_to_adr(p) - self.gc.write_barrier(newaddr, addr_struct) + self.gc.write_barrier(addr_struct) setattr(p, fieldname, newvalue) def writearray(self, p, index, newvalue): if self.gc.needs_write_barrier: - newaddr = llmemory.cast_ptr_to_adr(newvalue) addr_struct = llmemory.cast_ptr_to_adr(p) if hasattr(self.gc, 'write_barrier_from_array'): - self.gc.write_barrier_from_array(newaddr, addr_struct, index) + self.gc.write_barrier_from_array(addr_struct, index) else: - self.gc.write_barrier(newaddr, addr_struct) + self.gc.write_barrier(addr_struct) p[index] = newvalue def malloc(self, TYPE, n=None): From arigo at codespeak.net Mon Sep 20 17:40:46 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Sep 2010 17:40:46 +0200 (CEST) Subject: [pypy-svn] r77210 - pypy/branch/smaller-writebarrier/pypy/rpython/memory Message-ID: <20100920154046.BD746282BE3@codespeak.net> Author: arigo Date: Mon Sep 20 17:40:45 2010 New Revision: 77210 Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gcwrapper.py Log: Fix for test_gc. Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gcwrapper.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/rpython/memory/gcwrapper.py (original) +++ pypy/branch/smaller-writebarrier/pypy/rpython/memory/gcwrapper.py Mon Sep 20 17:40:45 2010 @@ -94,7 +94,6 @@ assert (type(index) is int # <- fast path or lltype.typeOf(index) == lltype.Signed) self.gc.write_barrier_from_array( - llmemory.cast_ptr_to_adr(newvalue), llmemory.cast_ptr_to_adr(toplevelcontainer), index) wb = False @@ -102,7 +101,6 @@ # if wb: self.gc.write_barrier( - llmemory.cast_ptr_to_adr(newvalue), llmemory.cast_ptr_to_adr(toplevelcontainer)) llheap.setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue) From arigo at codespeak.net Mon Sep 20 17:44:21 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Sep 2010 17:44:21 +0200 (CEST) Subject: [pypy-svn] r77211 - pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc Message-ID: <20100920154421.0E799282BE3@codespeak.net> Author: arigo Date: Mon Sep 20 17:44:20 2010 New Revision: 77211 Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/base.py Log: Fix. Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/base.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/base.py (original) +++ pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/base.py Mon Sep 20 17:44:20 2010 @@ -76,7 +76,7 @@ def set_root_walker(self, root_walker): self.root_walker = root_walker - def write_barrier(self, newvalue, addr_struct): + def write_barrier(self, addr_struct): pass def statistics(self, index): From antocuni at codespeak.net Mon Sep 20 17:46:16 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Mon, 20 Sep 2010 17:46:16 +0200 (CEST) Subject: [pypy-svn] r77212 - pypy/branch/resoperation-refactoring/pypy/jit/metainterp Message-ID: <20100920154616.A6000282BE3@codespeak.net> Author: antocuni Date: Mon Sep 20 17:46:15 2010 New Revision: 77212 Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py Log: (cfbolz, antocuni) very tentative checkin: try to make the creation of ResOperation faster Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py Mon Sep 20 17:46:15 2010 @@ -8,7 +8,7 @@ from pypy.tool.uid import uid from pypy.conftest import option -from pypy.jit.metainterp.resoperation import ResOperation, rop +from pypy.jit.metainterp.resoperation import ResOperation, ResOperation_fast, rop from pypy.jit.codewriter import heaptracker # ____________________________________________________________ @@ -834,6 +834,12 @@ self.operations.append(op) return op + def record_fast(self, opnum, resbox, descr, *args): + #op = ResOperation(opnum, argboxes, resbox, descr) + op = ResOperation_fast(opnum, resbox, descr, *args) + self.operations.append(op) + return op + def substitute_operation(self, position, opnum, argboxes, descr=None): resbox = self.operations[position].result op = ResOperation(opnum, argboxes, resbox, descr) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py Mon Sep 20 17:46:15 2010 @@ -1444,8 +1444,8 @@ if rop._ALWAYS_PURE_FIRST <= opnum <= rop._ALWAYS_PURE_LAST: return self._record_helper_pure(opnum, resbox, descr, *argboxes) else: - return self._record_helper_nonpure_varargs(opnum, resbox, descr, - list(argboxes)) + return self._record_helper_nonpure(opnum, resbox, descr, + *argboxes) @specialize.arg(1) def execute_and_record_varargs(self, opnum, argboxes, descr=None): @@ -1470,7 +1470,7 @@ return resbox else: resbox = resbox.nonconstbox() # ensure it is a Box - return self._record_helper_nonpure_varargs(opnum, resbox, descr, list(argboxes)) + return self._record_helper_nonpure(opnum, resbox, descr, *argboxes) def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes): canfold = self._all_constants_varargs(argboxes) @@ -1490,6 +1490,15 @@ self.attach_debug_info(op) return resbox + def _record_helper_nonpure(self, opnum, resbox, descr, *argboxes): + assert resbox is None or isinstance(resbox, Box) + # record the operation + profiler = self.staticdata.profiler + profiler.count_ops(opnum, RECORDED_OPS) + op = self.history.record_fast(opnum, resbox, descr, *argboxes) + self.attach_debug_info(op) + return resbox + def attach_debug_info(self, op): if (not we_are_translated() and op is not None and getattr(self, 'framestack', None)): Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py Mon Sep 20 17:46:15 2010 @@ -1,6 +1,7 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import make_sure_not_resized + def ResOperation(opnum, args, result, descr=None): cls = opclasses[opnum] op = cls(result) @@ -11,6 +12,17 @@ return op +def ResOperation_fast(opnum, result, descr, *args): + cls = opclasses[opnum] + op = cls(result) + op.initarglist_fast(*args) + if descr is not None: + assert isinstance(op, ResOpWithDescr) + op.setdescr(descr) + return op + + + class AbstractResOp(object): """The central ResOperation class, representing one operation.""" @@ -34,6 +46,10 @@ "This is supposed to be called only just after the ResOp has been created" raise NotImplementedError + def initarglist_fast(self, *args): + "This is supposed to be called only just after the ResOp has been created" + raise NotImplementedError + def getarglist(self): raise NotImplementedError @@ -214,6 +230,9 @@ def initarglist(self, args): assert len(args) == 0 + def initarglist_fast(self, *args): + assert len(args) == 0 + def getarglist(self): return [] @@ -235,6 +254,10 @@ assert len(args) == 1 self._arg0, = args + def initarglist_fast(self, *args): + assert len(args) == 1 + self._arg0, = args + def getarglist(self): return [self._arg0] @@ -263,6 +286,10 @@ assert len(args) == 2 self._arg0, self._arg1 = args + def initarglist_fast(self, *args): + assert len(args) == 2 + self._arg0, self._arg1 = args + def getarglist(self): return [self._arg0, self._arg1, self._arg2] @@ -299,6 +326,10 @@ assert len(args) == 3 self._arg0, self._arg1, self._arg2 = args + def initarglist_fast(self, *args): + assert len(args) == 3 + self._arg0, self._arg1, self._arg2 = args + def getarglist(self): return [self._arg0, self._arg1, self._arg2] @@ -332,6 +363,9 @@ def initarglist(self, args): self._args = args + def initarglist_fast(self, *args): + self._args = list(args) + def getarglist(self): return self._args From arigo at codespeak.net Mon Sep 20 18:00:15 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Sep 2010 18:00:15 +0200 (CEST) Subject: [pypy-svn] r77213 - pypy/trunk/pypy/jit/metainterp/optimizeopt Message-ID: <20100920160015.8A2CC282BE3@codespeak.net> Author: arigo Date: Mon Sep 20 18:00:14 2010 New Revision: 77213 Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/ (props changed) pypy/trunk/pypy/jit/metainterp/optimizeopt/__init__.py (props changed) pypy/trunk/pypy/jit/metainterp/optimizeopt/heap.py (props changed) pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py (props changed) pypy/trunk/pypy/jit/metainterp/optimizeopt/intutils.py (props changed) pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py (props changed) pypy/trunk/pypy/jit/metainterp/optimizeopt/virtualize.py (props changed) Log: fixeol From arigo at codespeak.net Mon Sep 20 18:00:44 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Sep 2010 18:00:44 +0200 (CEST) Subject: [pypy-svn] r77214 - pypy/branch/smaller-writebarrier/pypy/rpython/memory/gctransform Message-ID: <20100920160044.BEC6F282BE3@codespeak.net> Author: arigo Date: Mon Sep 20 18:00:43 2010 New Revision: 77214 Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gctransform/framework.py Log: Fix write_barrier when translated. Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gctransform/framework.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/rpython/memory/gctransform/framework.py (original) +++ pypy/branch/smaller-writebarrier/pypy/rpython/memory/gctransform/framework.py Mon Sep 20 18:00:43 2010 @@ -426,7 +426,6 @@ if GCClass.needs_write_barrier: self.write_barrier_ptr = getfn(GCClass.write_barrier.im_func, [s_gc, - annmodel.SomeAddress(), annmodel.SomeAddress()], annmodel.s_None, inline=True) @@ -435,15 +434,13 @@ # func should not be a bound method, but a real function assert isinstance(func, types.FunctionType) self.write_barrier_failing_case_ptr = getfn(func, - [annmodel.SomeAddress(), - annmodel.SomeAddress()], + [annmodel.SomeAddress()], annmodel.s_None) func = getattr(GCClass, 'write_barrier_from_array', None) if func is not None: self.write_barrier_from_array_ptr = getfn(func.im_func, [s_gc, annmodel.SomeAddress(), - annmodel.SomeAddress(), annmodel.SomeInteger()], annmodel.s_None, inline=True) @@ -455,8 +452,7 @@ self.write_barrier_from_array_failing_case_ptr = \ getfn(func, [annmodel.SomeAddress(), - annmodel.SomeInteger(), - annmodel.SomeAddress()], + annmodel.SomeInteger()], annmodel.s_None) self.statistics_ptr = getfn(GCClass.statistics.im_func, [s_gc, annmodel.SomeInteger()], @@ -1023,8 +1019,6 @@ and not isinstance(v_newvalue, Constant) and v_struct.concretetype.TO._gckind == "gc" and hop.spaceop not in self.clean_sets): - v_newvalue = hop.genop("cast_ptr_to_adr", [v_newvalue], - resulttype = llmemory.Address) v_structaddr = hop.genop("cast_ptr_to_adr", [v_struct], resulttype = llmemory.Address) if (self.write_barrier_from_array_ptr is not None and @@ -1034,14 +1028,12 @@ assert v_index.concretetype == lltype.Signed hop.genop("direct_call", [self.write_barrier_from_array_ptr, self.c_const_gc, - v_newvalue, v_structaddr, v_index]) else: self.write_barrier_calls += 1 hop.genop("direct_call", [self.write_barrier_ptr, self.c_const_gc, - v_newvalue, v_structaddr]) hop.rename('bare_' + opname) From arigo at codespeak.net Mon Sep 20 18:18:03 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Sep 2010 18:18:03 +0200 (CEST) Subject: [pypy-svn] r77215 - in pypy/branch/smaller-writebarrier/pypy/jit: backend/llgraph backend/llsupport backend/llsupport/test backend/test backend/x86 metainterp Message-ID: <20100920161803.DF530282BE3@codespeak.net> Author: arigo Date: Mon Sep 20 18:18:01 2010 New Revision: 77215 Modified: pypy/branch/smaller-writebarrier/pypy/jit/backend/llgraph/llimpl.py pypy/branch/smaller-writebarrier/pypy/jit/backend/llsupport/gc.py pypy/branch/smaller-writebarrier/pypy/jit/backend/llsupport/test/test_gc.py pypy/branch/smaller-writebarrier/pypy/jit/backend/test/runner_test.py pypy/branch/smaller-writebarrier/pypy/jit/backend/x86/assembler.py pypy/branch/smaller-writebarrier/pypy/jit/backend/x86/regalloc.py pypy/branch/smaller-writebarrier/pypy/jit/metainterp/resoperation.py Log: Fix write_barrier calls in the JIT. Modified: pypy/branch/smaller-writebarrier/pypy/jit/backend/llgraph/llimpl.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/jit/backend/llgraph/llimpl.py (original) +++ pypy/branch/smaller-writebarrier/pypy/jit/backend/llgraph/llimpl.py Mon Sep 20 18:18:01 2010 @@ -129,7 +129,7 @@ 'arraylen_gc' : (('ref',), 'int'), 'call' : (('ref', 'varargs'), 'intorptr'), 'call_assembler' : (('varargs',), 'intorptr'), - 'cond_call_gc_wb' : (('ptr', 'ptr'), None), + 'cond_call_gc_wb' : (('ptr',), None), 'oosend' : (('varargs',), 'intorptr'), 'oosend_pure' : (('varargs',), 'intorptr'), 'guard_true' : (('bool',), None), @@ -810,7 +810,7 @@ FLOAT: 0.0} return d[calldescr.typeinfo] - def op_cond_call_gc_wb(self, descr, a, b): + def op_cond_call_gc_wb(self, descr, a): py.test.skip("cond_call_gc_wb not supported") def op_oosend(self, descr, obj, *args): Modified: pypy/branch/smaller-writebarrier/pypy/jit/backend/llsupport/gc.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/jit/backend/llsupport/gc.py (original) +++ pypy/branch/smaller-writebarrier/pypy/jit/backend/llsupport/gc.py Mon Sep 20 18:18:01 2010 @@ -394,7 +394,7 @@ self.GC_MALLOC_BASIC = lltype.Ptr(lltype.FuncType( [lltype.Signed, lltype.Signed], llmemory.GCREF)) self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( - [llmemory.Address, llmemory.Address], lltype.Void)) + [llmemory.Address], lltype.Void)) self.write_barrier_descr = WriteBarrierDescr(self) # def malloc_array(itemsize, tid, num_elem): @@ -540,8 +540,7 @@ # the GC, and call it immediately llop1 = self.llop1 funcptr = llop1.get_write_barrier_failing_case(self.WB_FUNCPTR) - funcptr(llmemory.cast_ptr_to_adr(gcref_struct), - llmemory.cast_ptr_to_adr(gcref_newptr)) + funcptr(llmemory.cast_ptr_to_adr(gcref_struct)) def rewrite_assembler(self, cpu, operations): # Perform two kinds of rewrites in parallel: @@ -580,7 +579,7 @@ v = op.args[1] if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.args[0], v) + self._gen_write_barrier(newops, op.args[0]) op = ResOperation(rop.SETFIELD_RAW, op.args, None, descr=op.descr) # ---------- write barrier for SETARRAYITEM_GC ---------- @@ -588,7 +587,9 @@ v = op.args[2] if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.args[0], v) + # XXX detect when we should produce a + # write_barrier_from_array + self._gen_write_barrier(newops, op.args[0]) op = ResOperation(rop.SETARRAYITEM_RAW, op.args, None, descr=op.descr) # ---------- @@ -596,8 +597,8 @@ del operations[:] operations.extend(newops) - def _gen_write_barrier(self, newops, v_base, v_value): - args = [v_base, v_value] + def _gen_write_barrier(self, newops, v_base): + args = [v_base] newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, descr=self.write_barrier_descr)) Modified: pypy/branch/smaller-writebarrier/pypy/jit/backend/llsupport/test/test_gc.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/jit/backend/llsupport/test/test_gc.py (original) +++ pypy/branch/smaller-writebarrier/pypy/jit/backend/llsupport/test/test_gc.py Mon Sep 20 18:18:01 2010 @@ -141,8 +141,8 @@ repr(offset_to_length), p)) return p - def _write_barrier_failing_case(self, adr_struct, adr_newptr): - self.record.append(('barrier', adr_struct, adr_newptr)) + def _write_barrier_failing_case(self, adr_struct): + self.record.append(('barrier', adr_struct)) def get_write_barrier_failing_case(self, FPTRTYPE): return llhelper(FPTRTYPE, self._write_barrier_failing_case) @@ -238,7 +238,6 @@ s_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) r_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, r) s_adr = llmemory.cast_ptr_to_adr(s) - r_adr = llmemory.cast_ptr_to_adr(r) # s_hdr.tid &= ~gc_ll_descr.GCClass.JIT_WB_IF_FLAG gc_ll_descr.do_write_barrier(s_gcref, r_gcref) @@ -246,7 +245,7 @@ # s_hdr.tid |= gc_ll_descr.GCClass.JIT_WB_IF_FLAG gc_ll_descr.do_write_barrier(s_gcref, r_gcref) - assert self.llop1.record == [('barrier', s_adr, r_adr)] + assert self.llop1.record == [('barrier', s_adr)] def test_gen_write_barrier(self): gc_ll_descr = self.gc_ll_descr @@ -254,13 +253,12 @@ # newops = [] v_base = BoxPtr() - v_value = BoxPtr() - gc_ll_descr._gen_write_barrier(newops, v_base, v_value) + gc_ll_descr._gen_write_barrier(newops, v_base) assert llop1.record == [] assert len(newops) == 1 assert newops[0].opnum == rop.COND_CALL_GC_WB assert newops[0].args[0] == v_base - assert newops[0].args[1] == v_value + assert len(newops[0].args) == 1 assert newops[0].result is None wbdescr = newops[0].descr assert isinstance(wbdescr.jit_wb_if_flag, int) @@ -358,7 +356,7 @@ # assert operations[0].opnum == rop.COND_CALL_GC_WB assert operations[0].args[0] == v_base - assert operations[0].args[1] == v_value + assert len(operations[0].args) == 1 assert operations[0].result is None # assert operations[1].opnum == rop.SETFIELD_RAW @@ -381,7 +379,7 @@ # assert operations[0].opnum == rop.COND_CALL_GC_WB assert operations[0].args[0] == v_base - assert operations[0].args[1] == v_value + assert len(operations[0].args) == 1 assert operations[0].result is None # assert operations[1].opnum == rop.SETARRAYITEM_RAW Modified: pypy/branch/smaller-writebarrier/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/jit/backend/test/runner_test.py (original) +++ pypy/branch/smaller-writebarrier/pypy/jit/backend/test/runner_test.py Mon Sep 20 18:18:01 2010 @@ -1404,8 +1404,8 @@ assert not excvalue def test_cond_call_gc_wb(self): - def func_void(a, b): - record.append((a, b)) + def func_void(a): + record.append(a) record = [] # S = lltype.GcStruct('S', ('tid', lltype.Signed)) @@ -1430,10 +1430,10 @@ sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) del record[:] self.execute_operation(rop.COND_CALL_GC_WB, - [BoxPtr(sgcref), ConstInt(-2121)], + [BoxPtr(sgcref)], 'void', descr=WriteBarrierDescr()) if cond: - assert record == [(s, -2121)] + assert record == [s] else: assert record == [] Modified: pypy/branch/smaller-writebarrier/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/jit/backend/x86/assembler.py (original) +++ pypy/branch/smaller-writebarrier/pypy/jit/backend/x86/assembler.py Mon Sep 20 18:18:01 2010 @@ -1768,6 +1768,7 @@ jz_location = self.mc.get_relative_pos() # the following is supposed to be the slow path, so whenever possible # we choose the most compact encoding over the most efficient one. + # XXX improve a bit, particularly for IS_X86_64. for i in range(len(arglocs)-1, -1, -1): loc = arglocs[i] if isinstance(loc, RegLoc): @@ -1780,12 +1781,11 @@ self.mc.PUSH_i32(loc.getint()) if IS_X86_64: - # We clobber these registers to pass the arguments, but that's + # We clobber this register to pass the arguments, but that's # okay, because consider_cond_call_gc_wb makes sure that any # caller-save registers with values in them are present in arglocs, # so they are saved on the stack above and restored below self.mc.MOV_rs(edi.value, 0) - self.mc.MOV_rs(esi.value, 8) # misaligned stack in the call, but it's ok because the write barrier # is not going to call anything more. Also, this assumes that the Modified: pypy/branch/smaller-writebarrier/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/branch/smaller-writebarrier/pypy/jit/backend/x86/regalloc.py Mon Sep 20 18:18:01 2010 @@ -682,13 +682,9 @@ def consider_cond_call_gc_wb(self, op): assert op.result is None - loc_newvalue = self.rm.make_sure_var_in_reg(op.args[1], op.args) - # ^^^ we force loc_newvalue in a reg (unless it's a Const), - # because it will be needed anyway by the following setfield_gc. - # It avoids loading it twice from the memory. loc_base = self.rm.make_sure_var_in_reg(op.args[0], op.args, imm_fine=False) - arglocs = [loc_base, loc_newvalue] + arglocs = [loc_base] # add eax, ecx and edx as extra "arguments" to ensure they are # saved and restored. Fish in self.rm to know which of these # registers really need to be saved (a bit of a hack). Moreover, Modified: pypy/branch/smaller-writebarrier/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/smaller-writebarrier/pypy/jit/metainterp/resoperation.py Mon Sep 20 18:18:01 2010 @@ -219,7 +219,7 @@ 'UNICODESETITEM/3', 'NEWUNICODE/1', #'RUNTIMENEW/1', # ootype operation - 'COND_CALL_GC_WB', # [objptr, newvalue] (for the write barrier) + 'COND_CALL_GC_WB', # [objptr] (for the write barrier) 'DEBUG_MERGE_POINT/1', # debugging only 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend From afa at codespeak.net Mon Sep 20 19:56:30 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Mon, 20 Sep 2010 19:56:30 +0200 (CEST) Subject: [pypy-svn] r77216 - in pypy/branch/fast-forward/pypy/module/_multiprocessing: . test Message-ID: <20100920175630.D11F3282BE3@codespeak.net> Author: afa Date: Mon Sep 20 19:56:28 2010 New Revision: 77216 Added: pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_connection.py Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/__init__.py pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_win32.py Log: Start implementing _multiprocessing connection objects. Does not work so far. Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_multiprocessing/__init__.py (original) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/__init__.py Mon Sep 20 19:56:28 2010 @@ -5,6 +5,7 @@ interpleveldefs = { 'Connection' : 'interp_connection.W_SocketConnection', + 'PipeConnection' : 'interp_connection.W_PipeConnection', } appleveldefs = { Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py (original) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py Mon Sep 20 19:56:28 2010 @@ -1,10 +1,14 @@ -from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.baseobjspace import ObjSpace, Wrappable, W_Root from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.interpreter.gateway import interp2app, unwrap_spec +import sys INVALID_HANDLE_VALUE = -1 READABLE = 1 WRITABLE = 2 +PY_SSIZE_T_MAX = sys.maxint +PY_SSIZE_T_MIN = -sys.maxint - 1 class W_BaseConnection(Wrappable): def __init__(self, handle, flags): @@ -32,12 +36,110 @@ def writable_get(space, self): return space.w_bool(self.flags & WRITABLE) -class W_SocketConnection(W_BaseConnection): - pass + @unwrap_spec('self', ObjSpace, 'bufferstr', 'index', 'index') + def send_bytes(self, space, buffer, offset=0, size=PY_SSIZE_T_MIN): + length = len(buffer) + self._check_writable() + if offset < 0: + raise OperationError(space.w_ValueError, + space.wrap("offset is negative")) + if length < offset: + raise OperationError(space.w_ValueError, + space.wrap("buffer length < offset")) + + if size == PY_SSIZE_T_MIN: + size = length - offset + elif size < 0: + raise OperationError(space.w_ValueError, + space.wrap("size is negative")) + elif offset + size > length: + raise OperationError(space.w_ValueError, + space.wrap("buffer length > offset + size")) + + res = self.do_send_string(buffer, offset, size) + if res < 0: + raise mp_error(res) + + @unwrap_spec('self', ObjSpace, 'index') + def recv_bytes(self, space, maxlength=sys.maxint): + self._check_readable() + if maxlength < 0: + raise OperationError(space.w_ValueError, + space.wrap("maxlength < 0")) + + try: + res, newbuf = self.do_recv_string(maxlength) + + if res < 0: + if res == MP_BAD_MESSAGE_LENGTH: + self.flags &= ~READABLE + if self.flags == 0: + self.close() + raise mp_error(res) + + if newbuf is not None: + return space.wrap(rffi.charp2str(newbuf, res)) + else: + return space.wrap(rffi.charp2str(self.buffer, res)) + return result + finally: + if newbuf is not None: + rffi.free_charp(newbuf) + + @unwrap_spec('self', ObjSpace, W_Root, 'index') + def recv_bytes_into(self, space, w_buffer, offset=0): + rwbuffer = space.rwbuffer_w(w_buffer) + length = rwbuffer.getlength() + + try: + res, newbuf = self.do_recv_string(length - offset) + + if res < 0: + if res == MP_BAD_MESSAGE_LENGTH: + self.flags &= ~READABLE + if self.flags == 0: + self.close() + raise mp_error(res) + + if res > length - offset: + raise OperationError(BufferTooShort) + if newbuf is not None: + rwbuffer.setslice(offset, newbuf) + else: + rwbuffer.setslice(offset, self.buffer) + finally: + if newbuf is not None: + rffi.free_charp(newbuf) -W_SocketConnection.typedef = TypeDef( - 'Connection', + return space.wrap(res) + + +base_typedef = TypeDef( + 'BaseConnection', closed = GetSetProperty(W_BaseConnection.closed_get), readable = GetSetProperty(W_BaseConnection.readable_get), writable = GetSetProperty(W_BaseConnection.writable_get), + + send_bytes = interp2app(W_BaseConnection.send_bytes), + recv_bytes = interp2app(W_BaseConnection.recv_bytes), + recv_bytes_into = interp2app(W_BaseConnection.recv_bytes_into), + ## send = interp2app(W_BaseConnection.send), + ## recv = interp2app(W_BaseConnection.recv), + ## poll = interp2app(W_BaseConnection.poll), + ## fileno = interp2app(W_BaseConnection.fileno), + ## close = interp2app(W_BaseConnection.close), + ) + +class W_SocketConnection(W_BaseConnection): + pass + +W_SocketConnection.typedef = TypeDef( + 'Connection', base_typedef +) + +class W_PipeConnection(W_BaseConnection): + pass + +W_PipeConnection.typedef = TypeDef( + 'PipeConnection', base_typedef ) Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_win32.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_win32.py (original) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_win32.py Mon Sep 20 19:56:28 2010 @@ -58,6 +58,9 @@ rwin32.DWORD, rwin32.DWORD, rwin32.HANDLE], rwin32.HANDLE) +_ExitProcess = rwin32.winexternal( + 'ExitProcess', [rffi.UINT], lltype.Void) + def CloseHandle(space, w_handle): handle = handle_w(space, w_handle) if not rwin32.CloseHandle(handle): @@ -129,6 +132,10 @@ return w_handle(space, handle) + at unwrap_spec(ObjSpace, r_uint) +def ExitProcess(space, code): + _ExitProcess(code) + def win32_namespace(space): "NOT_RPYTHON" w_win32 = space.call_function(space.w_type, @@ -148,6 +155,7 @@ for name in ['CloseHandle', 'GetLastError', 'CreateFile', 'CreateNamedPipe', 'ConnectNamedPipe', 'SetNamedPipeHandleState', + 'ExitProcess', ]: function = globals()[name] w_function = space.wrap(interp2app(function)) Added: pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_connection.py ============================================================================== --- (empty file) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_connection.py Mon Sep 20 19:56:28 2010 @@ -0,0 +1,31 @@ +import py +import sys +from pypy.conftest import gettestobjspace + +class TestConnection: + def test_simple(self): + from pypy.module._multiprocessing import interp_connection + +class AppTestConnection: + def setup_class(cls): + if sys.platform != "win32": + py.test.skip("win32 only") + cls.space = gettestobjspace(usemodules=('thread', '_multiprocessing', + #'_rawffi', # on win32 + )) + if sys.platform == "win32": + space = cls.space + space.setitem(space.sys.get('modules'), + space.wrap('msvcrt'), space.sys) + space.setitem(space.sys.get('modules'), + space.wrap('_subprocess'), space.sys) + + def test_pipe_connection(self): + import multiprocessing + obj = [1, 2.0, "hello"] + whandle, rhandle = multiprocessing.Pipe() + whandle.send(obj) + obj2 = rhandle.recv() + assert obj == obj2 + + From afa at codespeak.net Tue Sep 21 00:10:21 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 21 Sep 2010 00:10:21 +0200 (CEST) Subject: [pypy-svn] r77217 - in pypy/branch/fast-forward/pypy/module/_multiprocessing: . test Message-ID: <20100920221021.D6584282BE9@codespeak.net> Author: afa Date: Tue Sep 21 00:10:19 2010 New Revision: 77217 Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_win32.py pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_connection.py pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_win32.py Log: _multiprocessing.PipeConnection: Enough progress for the simple case to work Does not yet translate! Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py (original) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py Tue Sep 21 00:10:19 2010 @@ -1,9 +1,9 @@ from pypy.interpreter.baseobjspace import ObjSpace, Wrappable, W_Root from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.rpython.lltypesystem import rffi, lltype import sys -INVALID_HANDLE_VALUE = -1 READABLE = 1 WRITABLE = 2 @@ -11,10 +11,19 @@ PY_SSIZE_T_MIN = -sys.maxint - 1 class W_BaseConnection(Wrappable): + BUFFER_SIZE = 1024 + def __init__(self, handle, flags): self.handle = handle self.flags = flags + self.buffer = lltype.malloc(rffi.CCHARP.TO, self.BUFFER_SIZE, + flavor='raw') + + def __del__(self): + lltype.free(self.buffer, flavor='raw') + self.do_close() + def descr_repr(self, space): conn_type = ["read-only", "write-only", "read-write"][self.flags] @@ -22,24 +31,28 @@ conn_type, space.type(self).getname(space, '?'), self.handle)) def close(self): - if self.handle != INVALID_HANDLE_VALUE: - self.do_close() - self.handle = INVALID_HANDLE_VALUE - - def __del__(self): - self.close() + self.do_close() def closed_get(space, self): - return space.w_bool(self.handle == INVALID_HANDLE_VALUE) + return space.w_bool(not self.is_valid()) def readable_get(space, self): return space.w_bool(self.flags & READABLE) def writable_get(space, self): return space.w_bool(self.flags & WRITABLE) + def _check_readable(self, space): + if not self.flags & READABLE: + raise OperationError(space.w_IOError, + space.wrap("connection is write-only")) + def _check_writable(self, space): + if not self.flags & WRITABLE: + raise OperationError(space.w_IOError, + space.wrap("connection is read-only")) + @unwrap_spec('self', ObjSpace, 'bufferstr', 'index', 'index') def send_bytes(self, space, buffer, offset=0, size=PY_SSIZE_T_MIN): length = len(buffer) - self._check_writable() + self._check_writable(space) if offset < 0: raise OperationError(space.w_ValueError, space.wrap("offset is negative")) @@ -56,20 +69,17 @@ raise OperationError(space.w_ValueError, space.wrap("buffer length > offset + size")) - res = self.do_send_string(buffer, offset, size) - if res < 0: - raise mp_error(res) + self.do_send_string(space, buffer, offset, size) @unwrap_spec('self', ObjSpace, 'index') - def recv_bytes(self, space, maxlength=sys.maxint): - self._check_readable() + def recv_bytes(self, space, maxlength=PY_SSIZE_T_MAX): + self._check_readable(space) if maxlength < 0: raise OperationError(space.w_ValueError, space.wrap("maxlength < 0")) + res, newbuf = self.do_recv_string(space, maxlength) try: - res, newbuf = self.do_recv_string(maxlength) - if res < 0: if res == MP_BAD_MESSAGE_LENGTH: self.flags &= ~READABLE @@ -77,13 +87,12 @@ self.close() raise mp_error(res) - if newbuf is not None: - return space.wrap(rffi.charp2str(newbuf, res)) + if newbuf: + return space.wrap(rffi.charpsize2str(newbuf, res)) else: - return space.wrap(rffi.charp2str(self.buffer, res)) - return result + return space.wrap(rffi.charpsize2str(self.buffer, res)) finally: - if newbuf is not None: + if newbuf: rffi.free_charp(newbuf) @unwrap_spec('self', ObjSpace, W_Root, 'index') @@ -91,9 +100,8 @@ rwbuffer = space.rwbuffer_w(w_buffer) length = rwbuffer.getlength() + res, newbuf = self.do_recv_string(space, length - offset) try: - res, newbuf = self.do_recv_string(length - offset) - if res < 0: if res == MP_BAD_MESSAGE_LENGTH: self.flags &= ~READABLE @@ -103,16 +111,60 @@ if res > length - offset: raise OperationError(BufferTooShort) - if newbuf is not None: - rwbuffer.setslice(offset, newbuf) + if newbuf: + rwbuffer.setslice(offset, rffi.charpsize2str(newbuf, res)) else: - rwbuffer.setslice(offset, self.buffer) + rwbuffer.setslice(offset, rffi.charpsize2str(self.buffer, res)) finally: - if newbuf is not None: + if newbuf: rffi.free_charp(newbuf) return space.wrap(res) + @unwrap_spec('self', ObjSpace, W_Root) + def send(self, space, w_obj): + self._check_writable(space) + + w_builtins = space.getbuiltinmodule('__builtin__') + w_picklemodule = space.call_method( + w_builtins, '__import__', space.wrap("pickle")) + w_protocol = space.getattr( + w_picklemodule, space.wrap("HIGHEST_PROTOCOL")) + w_pickled = space.call_method( + w_picklemodule, "dumps", w_obj, w_protocol) + + buffer = space.bufferstr_w(w_pickled) + self.do_send_string(space, buffer, 0, len(buffer)) + + @unwrap_spec('self', ObjSpace) + def recv(self, space): + self._check_readable(space) + + res, newbuf = self.do_recv_string(space, PY_SSIZE_T_MAX) + try: + if res < 0: + if res == MP_BAD_MESSAGE_LENGTH: + self.flags &= ~READABLE + if self.flags == 0: + self.close() + raise mp_error(res) + if newbuf: + w_received = space.wrap(rffi.charpsize2str(newbuf, res)) + else: + w_received = space.wrap(rffi.charpsize2str(self.buffer, res)) + finally: + if newbuf: + rffi.free_charp(newbuf) + + w_builtins = space.getbuiltinmodule('__builtin__') + w_picklemodule = space.call_method( + w_builtins, '__import__', space.wrap("pickle")) + w_unpickled = space.call_method( + w_picklemodule, "loads", w_received) + + return w_unpickled + + base_typedef = TypeDef( 'BaseConnection', @@ -123,23 +175,112 @@ send_bytes = interp2app(W_BaseConnection.send_bytes), recv_bytes = interp2app(W_BaseConnection.recv_bytes), recv_bytes_into = interp2app(W_BaseConnection.recv_bytes_into), - ## send = interp2app(W_BaseConnection.send), - ## recv = interp2app(W_BaseConnection.recv), + send = interp2app(W_BaseConnection.send), + recv = interp2app(W_BaseConnection.recv), ## poll = interp2app(W_BaseConnection.poll), ## fileno = interp2app(W_BaseConnection.fileno), - ## close = interp2app(W_BaseConnection.close), + close = interp2app(W_BaseConnection.close), ) class W_SocketConnection(W_BaseConnection): pass W_SocketConnection.typedef = TypeDef( - 'Connection', base_typedef + 'Connection', base_typedef, ) class W_PipeConnection(W_BaseConnection): - pass + if sys.platform == 'win32': + from pypy.rlib.rwin32 import INVALID_HANDLE_VALUE + + @unwrap_spec(ObjSpace, W_Root, W_Root, bool, bool) + def descr_new(space, w_subtype, w_handle, readable=True, writable=True): + from pypy.module._multiprocessing.interp_win32 import handle_w + handle = handle_w(space, w_handle) + flags = (readable and READABLE) | (writable and WRITABLE) + + self = space.allocate_instance(W_PipeConnection, w_subtype) + W_PipeConnection.__init__(self, handle, flags) + return space.wrap(self) + + def is_valid(self): + return self.handle != self.INVALID_HANDLE_VALUE + + def do_close(self): + from pypy.rlib.rwin32 import CloseHandle + if self.is_valid(): + CloseHandle(self.handle) + self.handle = self.INVALID_HANDLE_VALUE + + def do_send_string(self, space, buffer, offset, size): + from pypy.module._multiprocessing.interp_win32 import ( + _WriteFile, ERROR_NO_SYSTEM_RESOURCES) + from pypy.rlib import rwin32 + + charp = rffi.str2charp(buffer) + written_ptr = lltype.malloc(rffi.CArrayPtr(rwin32.DWORD).TO, 1, + flavor='raw') + try: + result = _WriteFile( + self.handle, rffi.ptradd(charp, offset), + size, written_ptr, rffi.NULL) + + if (result == 0 and + rwin32.GetLastError() == ERROR_NO_SYSTEM_RESOURCES): + raise operrfmt( + space.w_ValueError, + "Cannot send %ld bytes over connection", size) + finally: + rffi.free_charp(charp) + lltype.free(written_ptr, flavor='raw') + + def do_recv_string(self, space, maxlength): + from pypy.module._multiprocessing.interp_win32 import ( + _ReadFile) + from pypy.rlib import rwin32 + + read_ptr = lltype.malloc(rffi.CArrayPtr(rwin32.DWORD).TO, 1, + flavor='raw') + left_ptr = lltype.malloc(rffi.CArrayPtr(rwin32.DWORD).TO, 1, + flavor='raw') + try: + result = _ReadFile(self.handle, + self.buffer, min(self.BUFFER_SIZE, maxlength), + read_ptr, rffi.NULL) + if result: + return read_ptr[0], None + + err = rwin32.GetLastError() + if err == ERROR_BROKEN_PIPE: + return MP_END_OF_FILE + elif err != ERROR_MORE_DATA: + return MP_STANDARD_ERROR + + # More data... + if not _PeekNamedPipe(self.handle, rffi.NULL, 0, + rffi.NULL, rffi.NULL, left_ptr): + return MP_STANDARD_ERROR + + length = read_ptr[0] + left_ptr[0] + if length > maxlength: + return MP_BAD_MESSAGE_LENGTH + + newbuf = lltype.malloc(rffi.CCHARP.TO, length + 1, flavor='raw') + raw_memcopy(self.buffer, newbuf, read_ptr[0]) + + result = _ReadFile(self.handle, + rffi.ptradd(newbuf, read_ptr[0]), left_ptr[0], + read_ptr, rffi.NULL) + if result: + assert read_ptr[0] == left_ptr[0] + return length, newbuf + else: + rffi.free_charp(newbuf) + return MP_STANDARD_ERROR, None + finally: + lltype.free(read_ptr, flavor='raw') W_PipeConnection.typedef = TypeDef( - 'PipeConnection', base_typedef + 'PipeConnection', base_typedef, + __new__ = interp2app(W_PipeConnection.descr_new.im_func), ) Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_win32.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_win32.py (original) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_win32.py Tue Sep 21 00:10:19 2010 @@ -14,6 +14,7 @@ PIPE_UNLIMITED_INSTANCES NMPWAIT_WAIT_FOREVER ERROR_PIPE_CONNECTED ERROR_SEM_TIMEOUT ERROR_PIPE_BUSY + ERROR_NO_SYSTEM_RESOURCES """.split() class CConfig: @@ -25,9 +26,8 @@ for name in CONSTANTS: locals()[name] = rffi_platform.ConstantInteger(name) -class cConfig: - pass -cConfig.__dict__.update(rffi_platform.configure(CConfig)) +config = rffi_platform.configure(CConfig) +globals().update(config) def handle_w(space, w_handle): return rffi.cast(rwin32.HANDLE, space.int_w(w_handle)) @@ -58,6 +58,20 @@ rwin32.DWORD, rwin32.DWORD, rwin32.HANDLE], rwin32.HANDLE) +_WriteFile = rwin32.winexternal( + 'WriteFile', [ + rwin32.HANDLE, + rffi.VOIDP, rwin32.DWORD, + rwin32.LPDWORD, rffi.VOIDP], + rwin32.BOOL) + +_ReadFile = rwin32.winexternal( + 'ReadFile', [ + rwin32.HANDLE, + rffi.VOIDP, rwin32.DWORD, + rwin32.LPDWORD, rffi.VOIDP], + rwin32.BOOL) + _ExitProcess = rwin32.winexternal( 'ExitProcess', [rffi.UINT], lltype.Void) @@ -146,7 +160,7 @@ for name in CONSTANTS: space.setattr(w_win32, space.wrap(name), - space.wrap(getattr(cConfig, name))) + space.wrap(config[name])) space.setattr(w_win32, space.wrap('NULL'), space.newint(0)) Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_connection.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_connection.py (original) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_connection.py Tue Sep 21 00:10:19 2010 @@ -14,6 +14,8 @@ #'_rawffi', # on win32 )) if sys.platform == "win32": + # stubs for the 'msvcrt' and '_subprocess' module, + # just for multiprocessing to import correctly. space = cls.space space.setitem(space.sys.get('modules'), space.wrap('msvcrt'), space.sys) Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_win32.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_win32.py (original) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_win32.py Tue Sep 21 00:10:19 2010 @@ -12,6 +12,12 @@ from _multiprocessing import win32 raises(WindowsError, win32.CloseHandle, -1) + def test_CreateFile(self): + from _multiprocessing import win32 + err = raises(WindowsError, win32.CreateFile, + "in/valid", 0, 0, 0, 0, 0, 0) + assert err.value.winerror == 87 # ERROR_INVALID_PARAMETER + def test_pipe(self): from _multiprocessing import win32 import os From afa at codespeak.net Tue Sep 21 00:13:06 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 21 Sep 2010 00:13:06 +0200 (CEST) Subject: [pypy-svn] r77218 - pypy/branch/fast-forward/lib_pypy Message-ID: <20100920221306.5C593282BE9@codespeak.net> Author: afa Date: Tue Sep 21 00:13:05 2010 New Revision: 77218 Modified: pypy/branch/fast-forward/lib_pypy/struct.py Log: Build a list instead of a string slice so the .reverse() operation can work. Modified: pypy/branch/fast-forward/lib_pypy/struct.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/struct.py (original) +++ pypy/branch/fast-forward/lib_pypy/struct.py Tue Sep 21 00:13:05 2010 @@ -113,7 +113,7 @@ return ''.join(result) def unpack_float(data, index, size, le): - binary = data[index:index + 8] + binary = [data[i] for i in range(index, index + 8)] if le == "big": binary.reverse() unsigned = 0 From afa at codespeak.net Tue Sep 21 00:55:49 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 21 Sep 2010 00:55:49 +0200 (CEST) Subject: [pypy-svn] r77219 - pypy/branch/fast-forward/pypy/module/_multiprocessing/test Message-ID: <20100920225549.C9BD4282BE9@codespeak.net> Author: afa Date: Tue Sep 21 00:55:48 2010 New Revision: 77219 Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_connection.py Log: Minor reformatting Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_connection.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_connection.py (original) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_connection.py Tue Sep 21 00:55:48 2010 @@ -10,17 +10,14 @@ def setup_class(cls): if sys.platform != "win32": py.test.skip("win32 only") - cls.space = gettestobjspace(usemodules=('thread', '_multiprocessing', - #'_rawffi', # on win32 - )) + cls.space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) if sys.platform == "win32": - # stubs for the 'msvcrt' and '_subprocess' module, + # stubs for some modules, # just for multiprocessing to import correctly. space = cls.space - space.setitem(space.sys.get('modules'), - space.wrap('msvcrt'), space.sys) - space.setitem(space.sys.get('modules'), - space.wrap('_subprocess'), space.sys) + w_modules = space.sys.get('modules') + space.setitem(w_modules, space.wrap('msvcrt'), space.sys) + space.setitem(w_modules, space.wrap('_subprocess'), space.sys) def test_pipe_connection(self): import multiprocessing From arigo at codespeak.net Tue Sep 21 13:23:55 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 21 Sep 2010 13:23:55 +0200 (CEST) Subject: [pypy-svn] r77222 - pypy/trunk/pypy/jit/metainterp/test Message-ID: <20100921112355.F41EC282BF6@codespeak.net> Author: arigo Date: Tue Sep 21 13:23:54 2010 New Revision: 77222 Modified: pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py Log: Add passing tests. Modified: pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py Tue Sep 21 13:23:54 2010 @@ -2206,6 +2206,43 @@ """ self.optimize_loop(ops, 'Not', expected) + def test_fold_partially_constant_ops_ovf(self): + ops = """ + [i0] + i1 = int_sub_ovf(i0, 0) + guard_no_overflow() [] + jump(i1) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + ops = """ + [i0] + i1 = int_add_ovf(i0, 0) + guard_no_overflow() [] + jump(i1) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + ops = """ + [i0] + i1 = int_add_ovf(0, i0) + guard_no_overflow() [] + jump(i1) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + # ---------- def make_fail_descr(self): From afa at codespeak.net Tue Sep 21 14:22:40 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 21 Sep 2010 14:22:40 +0200 (CEST) Subject: [pypy-svn] r77223 - in pypy/branch/fast-forward/pypy/module/_multiprocessing: . test Message-ID: <20100921122240.72DEB282BF8@codespeak.net> Author: afa Date: Tue Sep 21 14:22:38 2010 New Revision: 77223 Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/__init__.py pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_connection.py Log: Implement part of _multiprocessing.Connection Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_multiprocessing/__init__.py (original) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/__init__.py Tue Sep 21 14:22:38 2010 @@ -4,7 +4,7 @@ class Module(MixedModule): interpleveldefs = { - 'Connection' : 'interp_connection.W_SocketConnection', + 'Connection' : 'interp_connection.W_FileConnection', 'PipeConnection' : 'interp_connection.W_PipeConnection', } Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py (original) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py Tue Sep 21 14:22:38 2010 @@ -2,7 +2,8 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.rpython.lltypesystem import rffi, lltype -import sys +from pypy.rlib.rarithmetic import r_uint +import sys, os READABLE = 1 WRITABLE = 2 @@ -13,10 +14,8 @@ class W_BaseConnection(Wrappable): BUFFER_SIZE = 1024 - def __init__(self, handle, flags): - self.handle = handle + def __init__(self, flags): self.flags = flags - self.buffer = lltype.malloc(rffi.CCHARP.TO, self.BUFFER_SIZE, flavor='raw') @@ -24,12 +23,6 @@ lltype.free(self.buffer, flavor='raw') self.do_close() - def descr_repr(self, space): - conn_type = ["read-only", "write-only", "read-write"][self.flags] - - return space.wrap("<%s %s, handle %zd>" % ( - conn_type, space.type(self).getname(space, '?'), self.handle)) - def close(self): self.do_close() @@ -178,21 +171,110 @@ send = interp2app(W_BaseConnection.send), recv = interp2app(W_BaseConnection.recv), ## poll = interp2app(W_BaseConnection.poll), - ## fileno = interp2app(W_BaseConnection.fileno), close = interp2app(W_BaseConnection.close), ) -class W_SocketConnection(W_BaseConnection): - pass +class W_FileConnection(W_BaseConnection): + INVALID_HANDLE_VALUE = -1 + + def __init__(self, fd, flags): + W_BaseConnection.__init__(self, flags) + self.fd = fd + + @unwrap_spec(ObjSpace, W_Root, int, bool, bool) + def descr_new(space, w_subtype, fd, readable=True, writable=True): + flags = (readable and READABLE) | (writable and WRITABLE) + + self = space.allocate_instance(W_FileConnection, w_subtype) + W_FileConnection.__init__(self, fd, flags) + return space.wrap(self) + + @unwrap_spec('self', ObjSpace) + def fileno(self, space): + return space.wrap(self.fd) + + def is_valid(self): + return self.fd != self.INVALID_HANDLE_VALUE + + def do_close(self): + if self.is_valid(): + os.close(self.fd) + self.fd = self.INVALID_HANDLE_VALUE -W_SocketConnection.typedef = TypeDef( + def do_send_string(self, space, buffer, offset, size): + # Since str2charp copies the buffer anyway, always combine the + # "header" and the "body" of the message and send them at once. + message = lltype.malloc(rffi.CCHARP.TO, size + 4, flavor='raw') + try: + rffi.cast(rffi.UINTP, message)[0] = r_uint(size) # XXX htonl! + i = size - 1 + while i >= 0: + message[4 + i] = buffer[offset + i] + i -= 1 + self._sendall(space, message, size + 4) + finally: + lltype.free(message, flavor='raw') + + def do_recv_string(self, space, maxlength): + length_ptr = lltype.malloc(rffi.CArrayPtr(rffi.UINT).TO, 1, + flavor='raw') + self._recvall(rffi.cast(rffi.CCHARP, length_ptr), 4) + length = length_ptr[0] + if length > maxlength: + return MP_BAD_MESSAGE_LENGTH + + if length <= self.BUFFER_SIZE: + self._recvall(self.buffer, length) + return length, None + else: + newbuf = lltype.malloc(rffi.CCHARP.TO, length, flavor='raw') + self._recvall(newbuf, length) + return length, newbuf + + def _sendall(self, space, message, size): + while size > 0: + # XXX inefficient + data = rffi.charpsize2str(message, size) + try: + count = os.write(self.fd, data) + except OSError, e: + raise wrap_oserror(space, e) + size -= count + message = rffi.ptradd(message, count) + + def _recvall(self, buffer, length): + remaining = length + while remaining > 0: + try: + data = os.read(self.fd, remaining) + except OSError, e: + raise wrap_oserror(space, e) + count = len(data) + if count == 0: + if remaining == length: + return MP_END_OF_FILE + else: + return MP_EARLY_END_OF_FILE + # XXX inefficient + for i in range(count): + buffer[i] = data[i] + remaining -= count + buffer = rffi.ptradd(buffer, count) + +W_FileConnection.typedef = TypeDef( 'Connection', base_typedef, + __new__ = interp2app(W_FileConnection.descr_new.im_func), + fileno = interp2app(W_FileConnection.fileno), ) class W_PipeConnection(W_BaseConnection): if sys.platform == 'win32': from pypy.rlib.rwin32 import INVALID_HANDLE_VALUE + def __init__(self, handle, flags): + W_BaseConnection.__init__(self, flags) + self.handle = handle + @unwrap_spec(ObjSpace, W_Root, W_Root, bool, bool) def descr_new(space, w_subtype, w_handle, readable=True, writable=True): from pypy.module._multiprocessing.interp_win32 import handle_w @@ -203,9 +285,20 @@ W_PipeConnection.__init__(self, handle, flags) return space.wrap(self) + def descr_repr(self, space): + conn_type = ["read-only", "write-only", "read-write"][self.flags] + + return space.wrap("<%s %s, handle %zd>" % ( + conn_type, space.type(self).getname(space, '?'), self.do_fileno())) + def is_valid(self): return self.handle != self.INVALID_HANDLE_VALUE + @unwrap_spec('self', ObjSpace) + def fileno(self, space): + from pypy.module._multiprocessing.interp_win32 import w_handle + return w_handle(space, self.handle) + def do_close(self): from pypy.rlib.rwin32 import CloseHandle if self.is_valid(): @@ -283,4 +376,5 @@ W_PipeConnection.typedef = TypeDef( 'PipeConnection', base_typedef, __new__ = interp2app(W_PipeConnection.descr_new.im_func), + fileno = interp2app(W_PipeConnection.fileno), ) Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_connection.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_connection.py (original) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/test/test_connection.py Tue Sep 21 14:22:38 2010 @@ -8,21 +8,39 @@ class AppTestConnection: def setup_class(cls): - if sys.platform != "win32": - py.test.skip("win32 only") - cls.space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + cls.space = space if sys.platform == "win32": # stubs for some modules, # just for multiprocessing to import correctly. - space = cls.space w_modules = space.sys.get('modules') space.setitem(w_modules, space.wrap('msvcrt'), space.sys) space.setitem(w_modules, space.wrap('_subprocess'), space.sys) - def test_pipe_connection(self): + # import multiprocessing once + space.appexec([], """(): import multiprocessing""") + + def test_winpipe_connection(self): + import sys + if sys.platform != "win32": + skip("win32 only") + import multiprocessing + rhandle, whandle = multiprocessing.Pipe() + + obj = [1, 2.0, "hello"] + whandle.send(obj) + obj2 = rhandle.recv() + assert obj == obj2 + + def test_ospipe_connection(self): + import _multiprocessing + import os + fd1, fd2 = os.pipe() + rhandle = _multiprocessing.Connection(fd1, writable=False) + whandle = _multiprocessing.Connection(fd2, readable=False) + obj = [1, 2.0, "hello"] - whandle, rhandle = multiprocessing.Pipe() whandle.send(obj) obj2 = rhandle.recv() assert obj == obj2 From afa at codespeak.net Tue Sep 21 16:16:32 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 21 Sep 2010 16:16:32 +0200 (CEST) Subject: [pypy-svn] r77227 - in pypy/branch/fast-forward: lib_pypy/_ctypes pypy/module/_rawffi Message-ID: <20100921141632.E239E282BFB@codespeak.net> Author: afa Date: Tue Sep 21 16:16:30 2010 New Revision: 77227 Modified: pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py pypy/branch/fast-forward/pypy/module/_rawffi/__init__.py pypy/branch/fast-forward/pypy/module/_rawffi/interp_rawffi.py Log: Add FUNCFLAG_USE_ERRNO to _ctypes. Not used yet, but needed to import the module. Modified: pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py (original) +++ pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py Tue Sep 21 16:16:30 2010 @@ -4,7 +4,7 @@ from _ctypes.primitive import _SimpleCData from _ctypes.pointer import _Pointer, _cast_addr from _ctypes.function import CFuncPtr -from _ctypes.dll import dlopen +from _ctypes.dll import dlopen as LoadLibrary from _ctypes.structure import Structure from _ctypes.array import Array from _ctypes.builtin import _memmove_addr, _string_at, _memset_addr,\ @@ -19,6 +19,8 @@ CopyComPointer = None # XXX from _rawffi import FUNCFLAG_STDCALL, FUNCFLAG_CDECL, FUNCFLAG_PYTHONAPI +from _rawffi import FUNCFLAG_USE_ERRNO, FUNCFLAG_USE_LASTERROR +from _rawffi import get_errno, set_errno __version__ = '1.1.0' #XXX platform dependant? Modified: pypy/branch/fast-forward/pypy/module/_rawffi/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_rawffi/__init__.py (original) +++ pypy/branch/fast-forward/pypy/module/_rawffi/__init__.py Tue Sep 21 16:16:30 2010 @@ -25,6 +25,8 @@ 'CallbackPtr' : 'callback.W_CallbackPtr', '_num_of_allocated_objects' : 'tracker.num_of_allocated_objects', 'get_libc' : 'interp_rawffi.get_libc', + 'get_errno' : 'interp_rawffi.get_errno', + 'set_errno' : 'interp_rawffi.set_errno', } appleveldefs = { @@ -41,6 +43,7 @@ from pypy.rlib import libffi for name in ['FUNCFLAG_STDCALL', 'FUNCFLAG_CDECL', 'FUNCFLAG_PYTHONAPI', + 'FUNCFLAG_USE_ERRNO', 'FUNCFLAG_USE_LASTERROR', ]: if hasattr(libffi, name): Module.interpleveldefs[name] = "space.wrap(%r)" % getattr(libffi, name) Modified: pypy/branch/fast-forward/pypy/module/_rawffi/interp_rawffi.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_rawffi/interp_rawffi.py (original) +++ pypy/branch/fast-forward/pypy/module/_rawffi/interp_rawffi.py Tue Sep 21 16:16:30 2010 @@ -8,6 +8,7 @@ from pypy.rlib.libffi import * from pypy.rpython.lltypesystem import lltype, rffi from pypy.rlib.unroll import unrolling_iterable +import pypy.rlib.rposix as rposix _MS_WINDOWS = os.name == "nt" @@ -505,3 +506,9 @@ return space.wrap(W_CDLL(space, get_libc_name())) except OSError, e: raise wrap_oserror(space, e) + +def get_errno(space): + return space.wrap(rposix.get_errno()) + +def set_errno(space, w_errno): + rposix.set_errno(space.int_w(w_errno)) From afa at codespeak.net Tue Sep 21 16:17:56 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 21 Sep 2010 16:17:56 +0200 (CEST) Subject: [pypy-svn] r77228 - pypy/branch/fast-forward/pypy/rlib Message-ID: <20100921141756.001CF282BFB@codespeak.net> Author: afa Date: Tue Sep 21 16:17:55 2010 New Revision: 77228 Modified: pypy/branch/fast-forward/pypy/rlib/libffi.py Log: Belongs to the previous commit. Modified: pypy/branch/fast-forward/pypy/rlib/libffi.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/libffi.py (original) +++ pypy/branch/fast-forward/pypy/rlib/libffi.py Tue Sep 21 16:17:55 2010 @@ -373,6 +373,8 @@ FUNCFLAG_STDCALL = 0 FUNCFLAG_CDECL = 1 # for WINAPI calls FUNCFLAG_PYTHONAPI = 4 +FUNCFLAG_USE_ERRNO = 8 +FUNCFLAG_USE_LASTERROR = 16 class AbstractFuncPtr(object): ll_cif = lltype.nullptr(FFI_CIFP.TO) From agaynor at codespeak.net Tue Sep 21 16:34:03 2010 From: agaynor at codespeak.net (agaynor at codespeak.net) Date: Tue, 21 Sep 2010 16:34:03 +0200 (CEST) Subject: [pypy-svn] r77229 - pypy/extradoc/planning Message-ID: <20100921143403.14F4C282BFB@codespeak.net> Author: agaynor Date: Tue Sep 21 16:34:01 2010 New Revision: 77229 Modified: pypy/extradoc/planning/jit.txt Log: This is fixed. Modified: pypy/extradoc/planning/jit.txt ============================================================================== --- pypy/extradoc/planning/jit.txt (original) +++ pypy/extradoc/planning/jit.txt Tue Sep 21 16:34:01 2010 @@ -39,9 +39,6 @@ the guard recovert code (only if we are sure that the stored field cannot change) -- int_add_ovf(x, 0) guard_overflow is 20% of all int_add_ovf, not much - overall, but probably worth attacking - - if we move a promotion up the chain, some arguments don't get replaced with constants (those between current and previous locations). So we get like From afa at codespeak.net Tue Sep 21 17:44:59 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 21 Sep 2010 17:44:59 +0200 (CEST) Subject: [pypy-svn] r77235 - in pypy/branch/fast-forward: lib_pypy/_ctypes pypy/module/_rawffi pypy/module/_rawffi/test pypy/module/test_lib_pypy/ctypes_tests pypy/rlib pypy/rpython/lltypesystem pypy/translator/c/src/libffi_msvc Message-ID: <20100921154459.CE14C282BFB@codespeak.net> Author: afa Date: Tue Sep 21 17:44:57 2010 New Revision: 77235 Modified: pypy/branch/fast-forward/lib_pypy/_ctypes/primitive.py pypy/branch/fast-forward/pypy/module/_rawffi/interp_rawffi.py pypy/branch/fast-forward/pypy/module/_rawffi/test/test__rawffi.py pypy/branch/fast-forward/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py pypy/branch/fast-forward/pypy/module/test_lib_pypy/ctypes_tests/test_structures.py pypy/branch/fast-forward/pypy/rlib/libffi.py pypy/branch/fast-forward/pypy/rlib/rarithmetic.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/lltype.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/rffi.py pypy/branch/fast-forward/pypy/translator/c/src/libffi_msvc/pypy_ffi.c Log: Add rffi support for "long double" and "bool" primitive types. Modified: pypy/branch/fast-forward/lib_pypy/_ctypes/primitive.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/_ctypes/primitive.py (original) +++ pypy/branch/fast-forward/lib_pypy/_ctypes/primitive.py Tue Sep 21 17:44:57 2010 @@ -2,7 +2,7 @@ import weakref import sys -SIMPLE_TYPE_CHARS = "cbBhHiIlLdfuzZqQPXOv" +SIMPLE_TYPE_CHARS = "cbBhHiIlLdfguzZqQPXOv?" from _ctypes.basics import _CData, _CDataMeta, cdata_from_address,\ CArgObject @@ -29,11 +29,13 @@ 'Q': 0, 'f': 0.0, 'd': 0.0, + 'g': 0.0, 'P': None, # not part of struct 'O': NULL, 'z': None, 'Z': None, + '?': False, } if sys.platform == 'win32': Modified: pypy/branch/fast-forward/pypy/module/_rawffi/interp_rawffi.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_rawffi/interp_rawffi.py (original) +++ pypy/branch/fast-forward/pypy/module/_rawffi/interp_rawffi.py Tue Sep 21 17:44:57 2010 @@ -35,14 +35,16 @@ 'Q' : cast_type_to_ffitype(rffi.ULONGLONG), 'f' : ffi_type_float, 'd' : ffi_type_double, + 'g' : ffi_type_longdouble, 's' : ffi_type_pointer, 'P' : ffi_type_pointer, 'z' : ffi_type_pointer, 'O' : ffi_type_pointer, 'Z' : ffi_type_pointer, + '?' : cast_type_to_ffitype(lltype.Bool), } TYPEMAP_PTR_LETTERS = "POszZ" -TYPEMAP_NUMBER_LETTERS = "bBhHiIlLqQ" +TYPEMAP_NUMBER_LETTERS = "bBhHiIlLqQ?" if _MS_WINDOWS: TYPEMAP['X'] = ffi_type_pointer @@ -68,11 +70,13 @@ 'Q' : rffi.ULONGLONG, 'f' : rffi.FLOAT, 'd' : rffi.DOUBLE, + 'g' : rffi.LONGDOUBLE, 's' : rffi.CCHARP, 'z' : rffi.CCHARP, 'Z' : rffi.CArrayPtr(lltype.UniChar), 'O' : rffi.VOIDP, 'P' : rffi.VOIDP, + '?' : lltype.Bool, } if _MS_WINDOWS: @@ -353,7 +357,7 @@ return space.wrap(rffi.cast(lltype.Unsigned, res)) elif c == 'q' or c == 'Q' or c == 'L' or c == 'c' or c == 'u': return space.wrap(func(add_arg, argdesc, ll_type)) - elif c == 'f' or c == 'd': + elif c == 'f' or c == 'd' or c == 'g': return space.wrap(float(func(add_arg, argdesc, ll_type))) else: return space.wrap(intmask(func(add_arg, argdesc, ll_type))) Modified: pypy/branch/fast-forward/pypy/module/_rawffi/test/test__rawffi.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_rawffi/test/test__rawffi.py (original) +++ pypy/branch/fast-forward/pypy/module/_rawffi/test/test__rawffi.py Tue Sep 21 17:44:57 2010 @@ -54,6 +54,7 @@ const char *static_str = "xxxxxx"; const long static_int = 42; const double static_double = 42.42; + const long double static_longdouble = 42.42; unsigned short add_shorts(short one, short two) { @@ -181,7 +182,7 @@ some_huge_value some_huge_uvalue pass_ll runcallback allocate_array - static_int static_double + static_int static_double static_longdouble sum_x_y give perturb get_s2a check_s2a AAA_first_ordinal_function @@ -817,6 +818,8 @@ assert a[0] == 42 a = getprimitive("d", "static_double") assert a[0] == 42.42 + a = getprimitive("g", "static_longdouble") + assert a[0] == 42.42 raises(ValueError, getprimitive, 'z', 'ddddddd') raises(ValueError, getprimitive, 'zzz', 'static_int') Modified: pypy/branch/fast-forward/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py (original) +++ pypy/branch/fast-forward/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py Tue Sep 21 17:44:57 2010 @@ -25,7 +25,7 @@ unsigned_types = [c_ubyte, c_ushort, c_uint, c_ulong] signed_types = [c_byte, c_short, c_int, c_long, c_longlong] -float_types = [c_double, c_float] +float_types = [c_double, c_float, c_longdouble] try: c_ulonglong Modified: pypy/branch/fast-forward/pypy/module/test_lib_pypy/ctypes_tests/test_structures.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/test_lib_pypy/ctypes_tests/test_structures.py (original) +++ pypy/branch/fast-forward/pypy/module/test_lib_pypy/ctypes_tests/test_structures.py Tue Sep 21 17:44:57 2010 @@ -57,6 +57,7 @@ "Q": c_ulonglong, "f": c_float, "d": c_double, + "g": c_longdouble, } def test_simple_structs(self): Modified: pypy/branch/fast-forward/pypy/rlib/libffi.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/libffi.py (original) +++ pypy/branch/fast-forward/pypy/rlib/libffi.py Tue Sep 21 17:44:57 2010 @@ -149,7 +149,7 @@ # ffi_type_slong and ffi_type_ulong are omitted because # their meaning changes too much from one libffi version to # another. DON'T USE THEM! use cast_type_to_ffitype(). - 'float', 'pointer', 'void', + 'float', 'longdouble', 'pointer', 'void', # by size 'sint8', 'uint8', 'sint16', 'uint16', 'sint32', 'uint32', 'sint64', 'uint64'] @@ -172,14 +172,16 @@ def _signed_type_for(TYPE): sz = rffi.sizeof(TYPE) - if sz == 2: return ffi_type_sint16 + if sz == 1: return ffi_type_sint8 + elif sz == 2: return ffi_type_sint16 elif sz == 4: return ffi_type_sint32 elif sz == 8: return ffi_type_sint64 else: raise ValueError("unsupported type size for %r" % (TYPE,)) def _unsigned_type_for(TYPE): sz = rffi.sizeof(TYPE) - if sz == 2: return ffi_type_uint16 + if sz == 1: return ffi_type_uint8 + elif sz == 2: return ffi_type_uint16 elif sz == 4: return ffi_type_uint32 elif sz == 8: return ffi_type_uint64 else: raise ValueError("unsupported type size for %r" % (TYPE,)) @@ -187,6 +189,7 @@ TYPE_MAP = { rffi.DOUBLE : ffi_type_double, rffi.FLOAT : ffi_type_float, + rffi.LONGDOUBLE : ffi_type_longdouble, rffi.UCHAR : ffi_type_uchar, rffi.CHAR : ffi_type_schar, rffi.SHORT : ffi_type_sshort, @@ -201,6 +204,7 @@ rffi.LONGLONG : _signed_type_for(rffi.LONGLONG), lltype.Void : ffi_type_void, lltype.UniChar : _unsigned_type_for(lltype.UniChar), + lltype.Bool : _unsigned_type_for(lltype.Bool), } def external(name, args, result, **kwds): Modified: pypy/branch/fast-forward/pypy/rlib/rarithmetic.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rarithmetic.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rarithmetic.py Tue Sep 21 17:44:57 2010 @@ -610,6 +610,25 @@ def __cmp__(self, other): raise TypeError("not supported on r_singlefloat instances") +class r_longfloat(object): + """A value of the C type 'long double'. + + Note that we consider this as a black box for now - the only thing + you can do with it is cast it back to a regular float.""" + + def __init__(self, floatval): + self.value = floatval + + def __float__(self): + return self.value + + def __nonzero__(self): + raise TypeError("not supported on r_longfloat instances") + + def __cmp__(self, other): + raise TypeError("not supported on r_longfloat instances") + + class For_r_singlefloat_values_Entry(extregistry.ExtRegistryEntry): _type_ = r_singlefloat Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py Tue Sep 21 17:44:57 2010 @@ -3,6 +3,9 @@ try: import ctypes import ctypes.util + + if not hasattr(ctypes, 'c_longdouble'): + ctypes.c_longdouble = ctypes.c_double except ImportError: ctypes = None @@ -18,7 +21,7 @@ from pypy.rlib.objectmodel import Symbolic, ComputedIntSymbolic from pypy.tool.uid import fixid from pypy.tool.tls import tlsobject -from pypy.rlib.rarithmetic import r_uint, r_singlefloat, intmask +from pypy.rlib.rarithmetic import r_uint, r_singlefloat, r_longfloat, intmask from pypy.annotation import model as annmodel from pypy.rpython.llinterp import LLInterpreter, LLException from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE @@ -37,6 +40,7 @@ lltype.Char: ctypes.c_ubyte, rffi.DOUBLE: ctypes.c_double, rffi.FLOAT: ctypes.c_float, + rffi.LONGDOUBLE: ctypes.c_longdouble, rffi.SIGNEDCHAR: ctypes.c_byte, rffi.UCHAR: ctypes.c_ubyte, rffi.SHORT: ctypes.c_short, @@ -782,6 +786,10 @@ if isinstance(cobj, ctypes.c_float): cobj = cobj.value llobj = r_singlefloat(cobj) + elif T is lltype.LongFloat: + if isinstance(cobj, ctypes.c_longdouble): + cobj = cobj.value + llobj = r_longfloat(cobj) elif T is lltype.Void: llobj = cobj else: Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/lltype.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/lltype.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/lltype.py Tue Sep 21 17:44:57 2010 @@ -4,8 +4,8 @@ import py from pypy.rlib.rarithmetic import (r_int, r_uint, intmask, r_singlefloat, - r_ulonglong, r_longlong, base_int, - normalizedinttype) + r_ulonglong, r_longlong, r_longfloat, + base_int, normalizedinttype) from pypy.rlib.objectmodel import Symbolic from pypy.tool.uid import Hashable from pypy.tool.tls import tlsobject @@ -624,6 +624,7 @@ Float = Primitive("Float", 0.0) # C type 'double' SingleFloat = Primitive("SingleFloat", r_singlefloat(0.0)) # C type 'float' +LongFloat = Primitive("LongFloat", r_longfloat(0.0)) # C type 'long double' r_singlefloat._TYPE = SingleFloat Char = Primitive("Char", '\x00') @@ -734,6 +735,8 @@ return build_number(None, tp) if tp is float: return Float + if tp is r_longfloat: + return LongFloat if tp is str: assert len(val) == 1 return Char @@ -765,6 +768,8 @@ elif ORIG == Float: if TGT == SingleFloat: return r_singlefloat(value) + elif TGT == LongFloat: + return r_longfloat(value) value = long(value) cast = _to_primitive.get(TGT) if cast is not None: @@ -773,6 +778,8 @@ return TGT._cast(value) if ORIG == SingleFloat and TGT == Float: return float(value) + if ORIG == LongFloat and TGT == Float: + return float(value) raise TypeError, "unsupported cast" def _cast_whatever(TGT, value): Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/rffi.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/rffi.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/rffi.py Tue Sep 21 17:44:57 2010 @@ -542,6 +542,7 @@ # double DOUBLE = lltype.Float +LONGDOUBLE = lltype.LongFloat # float - corresponds to pypy.rlib.rarithmetic.r_float, and supports no # operation except rffi.cast() between FLOAT and DOUBLE Modified: pypy/branch/fast-forward/pypy/translator/c/src/libffi_msvc/pypy_ffi.c ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/src/libffi_msvc/pypy_ffi.c (original) +++ pypy/branch/fast-forward/pypy/translator/c/src/libffi_msvc/pypy_ffi.c Tue Sep 21 17:44:57 2010 @@ -10,11 +10,13 @@ typedef struct { char c; long long x; } s_long_long; typedef struct { char c; float x; } s_float; typedef struct { char c; double x; } s_double; +typedef struct { char c; long double x; } s_long_double; typedef struct { char c; void *x; } s_void_p; #define FLOAT_ALIGN (sizeof(s_float) - sizeof(float)) #define DOUBLE_ALIGN (sizeof(s_double) - sizeof(double)) #define LONG_LONG_ALIGN (sizeof(s_long_long) - sizeof(long long)) #define VOID_P_ALIGN (sizeof(s_void_p) - sizeof(void*)) +#define LONGDOUBLE_ALIGN (sizeof(s_long_double) - sizeof(long double)) /* align and size are bogus for void, but they must not be zero */ ffi_type ffi_type_void = { 1, 1, FFI_TYPE_VOID }; @@ -33,6 +35,7 @@ ffi_type ffi_type_float = { sizeof(float), FLOAT_ALIGN, FFI_TYPE_FLOAT }; ffi_type ffi_type_double = { sizeof(double), DOUBLE_ALIGN, FFI_TYPE_DOUBLE }; +ffi_type ffi_type_longdouble = { sizeof(long double), LONGDOUBLE_ALIGN, FFI_TYPE_LONGDOUBLE }; ffi_type ffi_type_pointer = { sizeof(void *), VOID_P_ALIGN, FFI_TYPE_POINTER }; From afa at codespeak.net Tue Sep 21 18:40:12 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 21 Sep 2010 18:40:12 +0200 (CEST) Subject: [pypy-svn] r77241 - in pypy/trunk/pypy/rpython/lltypesystem: . test Message-ID: <20100921164012.70D7E282BFB@codespeak.net> Author: afa Date: Tue Sep 21 18:40:10 2010 New Revision: 77241 Modified: pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py pypy/trunk/pypy/rpython/lltypesystem/test/test_ll2ctypes.py Log: Test and fix for a crash in test__rawffi.py: Copy the _storage pointer instead of simply reference another pointer, which may change without notice. Modified: pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py Tue Sep 21 18:40:10 2010 @@ -734,10 +734,10 @@ elif isinstance(T.TO, lltype.Array): if T.TO._hints.get('nolength', False): container = _array_of_unknown_length(T.TO) - container._storage = cobj + container._storage = type(cobj)(cobj.contents) else: container = _array_of_known_length(T.TO) - container._storage = cobj + container._storage = type(cobj)(cobj.contents) elif isinstance(T.TO, lltype.FuncType): cobjkey = intmask(ctypes.cast(cobj, ctypes.c_void_p).value) if cobjkey in _int2obj: Modified: pypy/trunk/pypy/rpython/lltypesystem/test/test_ll2ctypes.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/test/test_ll2ctypes.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/test/test_ll2ctypes.py Tue Sep 21 18:40:10 2010 @@ -1252,6 +1252,32 @@ assert i == llmemory.cast_adr_to_int(a, "forced") lltype.free(p, flavor='raw') + def test_freelist(self): + S = lltype.Struct('S', ('x', lltype.Signed), ('y', lltype.Signed)) + SP = lltype.Ptr(S) + chunk = lltype.malloc(rffi.CArrayPtr(S).TO, 10, flavor='raw') + assert lltype.typeOf(chunk) == rffi.CArrayPtr(S) + free_list = lltype.nullptr(rffi.VOIDP.TO) + # build list + current = chunk + for i in range(10): + rffi.cast(rffi.VOIDPP, current)[0] = free_list + free_list = rffi.cast(rffi.VOIDP, current) + current = rffi.ptradd(current, 1) + # get one + p = free_list + free_list = rffi.cast(rffi.VOIDPP, p)[0] + rffi.cast(SP, p).x = 0 + # get two + p = free_list + free_list = rffi.cast(rffi.VOIDPP, p)[0] + rffi.cast(SP, p).x = 0 + # get three + p = free_list + free_list = rffi.cast(rffi.VOIDPP, p)[0] + rffi.cast(SP, p).x = 0 + lltype.free(chunk, flavor='raw') + class TestPlatform(object): def test_lib_on_libpaths(self): from pypy.translator.platform import platform From afa at codespeak.net Tue Sep 21 18:49:58 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 21 Sep 2010 18:49:58 +0200 (CEST) Subject: [pypy-svn] r77242 - in pypy/branch/fast-forward: . pypy/jit/metainterp/optimizeopt pypy/jit/metainterp/test pypy/module/array/test pypy/rpython/lltypesystem pypy/rpython/lltypesystem/test Message-ID: <20100921164958.BFF93282BFE@codespeak.net> Author: afa Date: Tue Sep 21 18:49:56 2010 New Revision: 77242 Modified: pypy/branch/fast-forward/ (props changed) pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/ (props changed) pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/__init__.py (props changed) pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/heap.py (props changed) pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/intbounds.py (props changed) pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/intutils.py (props changed) pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/optimizer.py (props changed) pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/rewrite.py (props changed) pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/virtualize.py (props changed) pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/fast-forward/pypy/module/array/test/test_array_old.py (props changed) pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_ll2ctypes.py Log: Merge from trunk Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py Tue Sep 21 18:49:56 2010 @@ -2206,6 +2206,43 @@ """ self.optimize_loop(ops, 'Not', expected) + def test_fold_partially_constant_ops_ovf(self): + ops = """ + [i0] + i1 = int_sub_ovf(i0, 0) + guard_no_overflow() [] + jump(i1) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + ops = """ + [i0] + i1 = int_add_ovf(i0, 0) + guard_no_overflow() [] + jump(i1) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + ops = """ + [i0] + i1 = int_add_ovf(0, i0) + guard_no_overflow() [] + jump(i1) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + # ---------- def make_fail_descr(self): Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py Tue Sep 21 18:49:56 2010 @@ -738,10 +738,10 @@ elif isinstance(T.TO, lltype.Array): if T.TO._hints.get('nolength', False): container = _array_of_unknown_length(T.TO) - container._storage = cobj + container._storage = type(cobj)(cobj.contents) else: container = _array_of_known_length(T.TO) - container._storage = cobj + container._storage = type(cobj)(cobj.contents) elif isinstance(T.TO, lltype.FuncType): cobjkey = intmask(ctypes.cast(cobj, ctypes.c_void_p).value) if cobjkey in _int2obj: Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_ll2ctypes.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_ll2ctypes.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_ll2ctypes.py Tue Sep 21 18:49:56 2010 @@ -1252,6 +1252,32 @@ assert i == llmemory.cast_adr_to_int(a, "forced") lltype.free(p, flavor='raw') + def test_freelist(self): + S = lltype.Struct('S', ('x', lltype.Signed), ('y', lltype.Signed)) + SP = lltype.Ptr(S) + chunk = lltype.malloc(rffi.CArrayPtr(S).TO, 10, flavor='raw') + assert lltype.typeOf(chunk) == rffi.CArrayPtr(S) + free_list = lltype.nullptr(rffi.VOIDP.TO) + # build list + current = chunk + for i in range(10): + rffi.cast(rffi.VOIDPP, current)[0] = free_list + free_list = rffi.cast(rffi.VOIDP, current) + current = rffi.ptradd(current, 1) + # get one + p = free_list + free_list = rffi.cast(rffi.VOIDPP, p)[0] + rffi.cast(SP, p).x = 0 + # get two + p = free_list + free_list = rffi.cast(rffi.VOIDPP, p)[0] + rffi.cast(SP, p).x = 0 + # get three + p = free_list + free_list = rffi.cast(rffi.VOIDPP, p)[0] + rffi.cast(SP, p).x = 0 + lltype.free(chunk, flavor='raw') + class TestPlatform(object): def test_lib_on_libpaths(self): from pypy.translator.platform import platform From hakanardo at codespeak.net Tue Sep 21 19:02:12 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Tue, 21 Sep 2010 19:02:12 +0200 (CEST) Subject: [pypy-svn] r77243 - in pypy/branch/jit-loop-invaraints/pypy: jit/metainterp jit/metainterp/optimizeopt jit/metainterp/test jit/tl module/pypyjit/test Message-ID: <20100921170212.F18A3282BFE@codespeak.net> Author: hakanardo Date: Tue Sep 21 19:02:10 2010 New Revision: 77243 Added: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/compile.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/history.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/__init__.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/resoperation.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_virtual.py pypy/branch/jit-loop-invaraints/pypy/jit/tl/pypyjit_demo.py pypy/branch/jit-loop-invaraints/pypy/module/pypyjit/test/test_pypy_c.py Log: Move some always_pure loop invariant operations out of loops Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/compile.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/compile.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/compile.py Tue Sep 21 19:02:10 2010 @@ -55,6 +55,8 @@ history = metainterp.history loop = create_empty_loop(metainterp) loop.greenkey = greenkey + loop.preamble = create_empty_loop(metainterp) + loop.preamble.greenkey = greenkey loop.inputargs = history.inputargs for box in loop.inputargs: assert isinstance(box, Box) @@ -65,6 +67,7 @@ jitdriver_sd = metainterp.jitdriver_sd loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd) loop.token = loop_token + loop.preamble.token = make_loop_token(0, jitdriver_sd) loop.operations[-1].descr = loop_token # patch the target of the JUMP try: old_loop_token = jitdriver_sd.warmstate.optimize_loop( @@ -74,9 +77,16 @@ if old_loop_token is not None: metainterp.staticdata.log("reusing old loop") return old_loop_token - send_loop_to_backend(metainterp_sd, loop, "loop") - insert_loop_token(old_loop_tokens, loop_token) - return loop_token + + if loop.preamble.operations: + send_loop_to_backend(metainterp_sd, loop, "loop") + send_loop_to_backend(metainterp_sd, loop.preamble, "loop") + insert_loop_token(old_loop_tokens, loop.preamble.token) + return loop.preamble.token + else: + send_loop_to_backend(metainterp_sd, loop, "loop") + insert_loop_token(old_loop_tokens, loop_token) + return loop_token def insert_loop_token(old_loop_tokens, loop_token): # Find where in old_loop_tokens we should insert this new loop_token. Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/history.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/history.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/history.py Tue Sep 21 19:02:10 2010 @@ -714,6 +714,7 @@ inputargs = None operations = None token = None + preamble = None def __init__(self, name): self.name = name Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/__init__.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/__init__.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/__init__.py Tue Sep 21 19:02:10 2010 @@ -3,6 +3,7 @@ from pypy.jit.metainterp.optimizeopt.intbounds import OptIntBounds from pypy.jit.metainterp.optimizeopt.virtualize import OptVirtualize from pypy.jit.metainterp.optimizeopt.heap import OptHeap +from pypy.jit.metainterp.optimizeopt.invariant import OptInvariant def optimize_loop_1(metainterp_sd, loop, virtuals=True): """Optimize loop.operations to make it match the input of loop.specnodes @@ -14,7 +15,9 @@ OptRewrite(), OptVirtualize(), OptHeap(), - ] + OptInvariant(), + ] + optimizer = Optimizer(metainterp_sd, loop, optimizations, virtuals) optimizer.propagate_all_forward() Added: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py ============================================================================== --- (empty file) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py Tue Sep 21 19:02:10 2010 @@ -0,0 +1,65 @@ +from pypy.jit.metainterp.optimizeopt.optimizer import * +from pypy.jit.metainterp.resoperation import rop, ResOperation + +class OptInvariant(Optimization): + """Move loop invariant code into a preamble. + """ + def setup(self, virtuals): + if not virtuals: + return + + inputargs = self.optimizer.original_inputargs + if not inputargs: + return + + jump_op = self.optimizer.loop.operations[-1] + assert(jump_op.opnum == rop.JUMP) + #for arg_in, arg_out in zip(inputargs, jump_op.args): + print + print inputargs, jump_op.args + for i in range(len(inputargs)): + arg_in, arg_out = inputargs[i], jump_op.args[i] + if arg_in is arg_out: + print "Invariant: ", arg_in + v = self.getvalue(arg_in) + v.invariant = True + self.invariant_boxes = [] + + def propagate_forward(self, op): + + if op.opnum == rop.JUMP: + loop = self.optimizer.loop + if loop.preamble and len(self.optimizer.preamble)>0: + preamble = loop.preamble + preamble.inputargs = loop.inputargs[:] + loop.inputargs.extend(self.invariant_boxes) + op.args = op.args + self.invariant_boxes + preamble.operations = self.optimizer.preamble + preamble.token.specnodes = loop.token.specnodes + jmp = ResOperation(rop.JUMP, + loop.inputargs[:], + None) + jmp.descr = loop.token + preamble.operations.append(jmp) + + elif op.is_always_pure(): + for a in op.args: + if self.get_constant_box(a) is None: + if a not in self.optimizer.values: + break + v = self.getvalue(a) + if not v.invariant: + break + else: + print "P: ", op, op.opnum + op.invariant = True + self.emit_operation(op) + if self.get_constant_box(op.result) is None: + v = self.getvalue(op.result) + v.invariant = True + box = v.force_box() + if box not in self.invariant_boxes: + self.invariant_boxes.append(box) + return + + self.emit_operation(op) Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py Tue Sep 21 19:02:10 2010 @@ -23,12 +23,14 @@ MININT = -sys.maxint - 1 class OptValue(object): - _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound') + _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound', + 'invariant') last_guard_index = -1 level = LEVEL_UNKNOWN known_class = None intbound = None + invariant = False def __init__(self, box): self.box = box @@ -199,6 +201,9 @@ self.pure_operations = args_dict() self.producer = {} self.pendingfields = [] + self.preamble = [] + + self.original_inputargs = self.loop.inputargs if optimizations: self.first_optimization = optimizations[0] @@ -336,7 +341,10 @@ self.exception_might_have_happened = True elif op.returns_bool_result(): self.bool_boxes[self.getvalue(op.result)] = None - self.newoperations.append(op) + if op.invariant: + self.preamble.append(op) + else: + self.newoperations.append(op) def store_final_boxes_in_guard(self, op): ###pendingfields = self.heap_op_optimizer.force_lazy_setfields_for_guard() Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/resoperation.py Tue Sep 21 19:02:10 2010 @@ -3,7 +3,6 @@ class ResOperation(object): """The central ResOperation class, representing one operation.""" - # for 'guard_*' fail_args = None @@ -11,6 +10,8 @@ name = "" pc = 0 + invariant = False + def __init__(self, opnum, args, result, descr=None): make_sure_not_resized(args) assert isinstance(opnum, int) Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py Tue Sep 21 19:02:10 2010 @@ -303,6 +303,48 @@ found += 1 assert found == 1 + def test_loop_invariant_mul(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + res += x * x + y -= 1 + return res + res = self.meta_interp(f, [6, 7]) + assert res == 252 + self.check_loop_count(2) + self.check_loops({'guard_true': 1, + 'int_add': 1, 'int_sub': 1, 'int_gt': 1, + 'int_mul': 1, + 'jump': 2}) + + def test_loop_invariant_intbox(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + class I: + __slots__ = 'intval' + _immutable_ = True + def __init__(self, intval): + self.intval = intval + def f(i, y): + res = 0 + x = I(i) + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + res += x.intval * x.intval + y -= 1 + return res + res = self.meta_interp(f, [6, 7]) + assert res == 252 + self.check_loop_count(2) + self.check_loops({'guard_true': 1, + 'int_add': 1, 'int_sub': 1, 'int_gt': 1, + 'int_mul': 1, 'getfield_gc_pure': 1, + 'jump': 2}) + def test_loops_are_transient(self): import gc, weakref myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_optimizeopt.py Tue Sep 21 19:02:10 2010 @@ -8,7 +8,8 @@ import pypy.jit.metainterp.optimizeopt.virtualize as virtualize from pypy.jit.metainterp.optimizeopt import optimize_loop_1 from pypy.jit.metainterp.optimizeutil import InvalidLoop -from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt +from pypy.jit.metainterp.history import (AbstractDescr, ConstInt, BoxInt, + TreeLoop, LoopToken) from pypy.jit.metainterp.jitprof import EmptyProfiler from pypy.jit.metainterp import executor, compile, resume, history from pypy.jit.metainterp.resoperation import rop, opname, ResOperation @@ -255,6 +256,8 @@ metainterp_sd = FakeMetaInterpStaticData(self.cpu) if hasattr(self, 'vrefinfo'): metainterp_sd.virtualref_info = self.vrefinfo + loop.preamble = TreeLoop('preamble') + loop.preamble.token = LoopToken() optimize_loop_1(metainterp_sd, loop) # expected = self.parse(optops) @@ -3838,6 +3841,35 @@ """ self.optimize_loop(ops, 'Not, Not', expected) + def test_loop_invariant_simple(self): + ops = """ + [i0, i1] + i2 = int_add(i0, 1) + i3 = int_add(i2, i1) + jump(i0, i3) + """ + expected = """ + [i0, i1, i2] + i3 = int_add(i2, i1) + jump(i0, i3, i2) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_loop_invariant_getfield(self): + ops = """ + [p0, i1] + i0 = getfield_gc_pure(p0, descr=valuedescr) + i2 = int_add(i0, 1) + i3 = int_add(i2, i1) + jump(p0, i3) + """ + expected = """ + [p0, i1, i0, i2] + i3 = int_add(i2, i1) + jump(p0, i3, i0, i2) + """ + self.optimize_loop(ops, 'Not, Not', expected) + Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_virtual.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_virtual.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_virtual.py Tue Sep 21 19:02:10 2010 @@ -34,6 +34,28 @@ self.check_loops(new=0, new_with_vtable=0, getfield_gc=0, setfield_gc=0) + def test_virtualized_and_invariant(self): + myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'node']) + def f(n, sa): + node = self._new() + node.value = 0 + node.extra = 0 + while n > 0: + myjitdriver.can_enter_jit(n=n, node=node, sa=sa) + myjitdriver.jit_merge_point(n=n, node=node, sa=sa) + next = self._new() + next.value = node.value + n + next.extra = node.extra + sa * sa + node = next + n -= 1 + return node.value * node.extra + assert f(10, 1) == 55 * 10 + res = self.meta_interp(f, [10, 1]) + assert res == 55 * 10 + self.check_loop_count(2) + self.check_loops(new=0, new_with_vtable=0, + getfield_gc=0, setfield_gc=0) + def test_virtualized_float(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) def f(n): Modified: pypy/branch/jit-loop-invaraints/pypy/jit/tl/pypyjit_demo.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/tl/pypyjit_demo.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/tl/pypyjit_demo.py Tue Sep 21 19:02:10 2010 @@ -37,29 +37,67 @@ ## t2 = time.time() ## print t2 - t1 +## try: +## from array import array + +## def coords(w,h): +## y = 0 +## while y < h: +## x = 0 +## while x < w: +## yield x,y +## x += 1 +## y += 1 + +## def f(img): +## sa=0 +## for x, y in coords(4,4): +## sa += x * y +## return sa + +## #img=array('h',(1,2,3,4)) +## print f(3) +## except Exception, e: +## print "Exception: ", type(e) +## print e + +## try: + +## def f(): +## i = 0 +## a = 2 +## sa = 0 +## while i < 5: +## sa += a*a +## i += 1 +## return sa + +## print f() + +## except Exception, e: +## print "Exception: ", type(e) +## print e + + try: + from array import array - def coords(w,h): - y = 0 - while y < h: - x = 0 - while x < w: - yield x,y - x += 1 - y += 1 - - def f(img): - sa=0 - for x, y in coords(4,4): - sa += x * y + def f(): + i = 0 + a = 1 + sa = array('d', (0,0)) + while i < 500000000: + sa[0] += a*a + i += 1 return sa - #img=array('h',(1,2,3,4)) - print f(3) + print f() + except Exception, e: print "Exception: ", type(e) print e + ## def f(): ## a=7 Modified: pypy/branch/jit-loop-invaraints/pypy/module/pypyjit/test/test_pypy_c.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/module/pypyjit/test/test_pypy_c.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/module/pypyjit/test/test_pypy_c.py Tue Sep 21 19:02:10 2010 @@ -1113,6 +1113,32 @@ return sa ''', 88, ([], 1997001)) + def test_invariant_mul(self): + self.run_source(''' + def main(a): + i = 0 + sa = 0 + while i < 2000: + sa += a*a + i += 1 + return sa + ''', 34, ([2], 8000)) + + def test_invariant_mul_bridge(self): + self.run_source(''' + def main(a): + i = 0 + sa = 0 + while i < 4000: + sa += a*a + if i > 2000: + a = 7 + i += 1 + return sa + ''', 61, ([2], 105910)) + + + # test_circular class AppTestJIT(PyPyCJITTests): From afa at codespeak.net Tue Sep 21 19:07:54 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 21 Sep 2010 19:07:54 +0200 (CEST) Subject: [pypy-svn] r77246 - pypy/branch/fast-forward/lib_pypy/_ctypes Message-ID: <20100921170754.61668282BFB@codespeak.net> Author: afa Date: Tue Sep 21 19:07:52 2010 New Revision: 77246 Modified: pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py pypy/branch/fast-forward/lib_pypy/_ctypes/pointer.py Log: ctypes.POINTER was rewritten in C "for performance". For pypy, we simply copy the old python code... Modified: pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py (original) +++ pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py Tue Sep 21 19:07:52 2010 @@ -3,6 +3,7 @@ ArgumentError, COMError from _ctypes.primitive import _SimpleCData from _ctypes.pointer import _Pointer, _cast_addr +from _ctypes.pointer import POINTER, pointer, _pointer_type_cache from _ctypes.function import CFuncPtr from _ctypes.dll import dlopen as LoadLibrary from _ctypes.structure import Structure Modified: pypy/branch/fast-forward/lib_pypy/_ctypes/pointer.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/_ctypes/pointer.py (original) +++ pypy/branch/fast-forward/lib_pypy/_ctypes/pointer.py Tue Sep 21 19:07:52 2010 @@ -5,6 +5,9 @@ from _ctypes.array import Array, array_get_slice_params, array_slice_getitem,\ array_slice_setitem +# This cache maps types to pointers to them. +_pointer_type_cache = {} + DEFAULT_VALUE = object() class PointerType(_CDataMeta): @@ -136,3 +139,26 @@ result._buffer[0] = obj._buffer[0] return result + +def POINTER(cls): + try: + return _pointer_type_cache[cls] + except KeyError: + pass + if type(cls) is str: + klass = type(_Pointer)("LP_%s" % cls, + (_Pointer,), + {}) + _pointer_type_cache[id(klass)] = klass + return klass + else: + name = "LP_%s" % cls.__name__ + klass = type(_Pointer)(name, + (_Pointer,), + {'_type_': cls}) + _pointer_type_cache[cls] = klass + return klass + +def pointer(inst): + return POINTER(type(inst))(inst) + From afa at codespeak.net Tue Sep 21 19:30:27 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 21 Sep 2010 19:30:27 +0200 (CEST) Subject: [pypy-svn] r77249 - in pypy/branch/fast-forward: lib_pypy/_ctypes pypy/module/_rawffi Message-ID: <20100921173027.BBC38282BFB@codespeak.net> Author: afa Date: Tue Sep 21 19:30:26 2010 New Revision: 77249 Modified: pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py pypy/branch/fast-forward/pypy/module/_rawffi/__init__.py pypy/branch/fast-forward/pypy/module/_rawffi/interp_rawffi.py Log: Export _ctypes.get_last_error, set_last_error (on Windows) Modified: pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py (original) +++ pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py Tue Sep 21 19:30:26 2010 @@ -23,6 +23,9 @@ from _rawffi import FUNCFLAG_USE_ERRNO, FUNCFLAG_USE_LASTERROR from _rawffi import get_errno, set_errno +if _os.name in ("nt", "ce"): + from _rawffi import get_last_error, set_last_error + __version__ = '1.1.0' #XXX platform dependant? RTLD_LOCAL = 0 Modified: pypy/branch/fast-forward/pypy/module/_rawffi/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_rawffi/__init__.py (original) +++ pypy/branch/fast-forward/pypy/module/_rawffi/__init__.py Tue Sep 21 19:30:26 2010 @@ -6,6 +6,7 @@ from pypy.module._rawffi.interp_rawffi import W_CDLL from pypy.rpython.lltypesystem import lltype, rffi from pypy.module._rawffi.tracker import Tracker +import sys class Module(MixedModule): @@ -29,6 +30,10 @@ 'set_errno' : 'interp_rawffi.set_errno', } + if sys.platform == 'win32': + interpleveldefs['get_last_error'] = 'interp_rawffi.get_last_error' + interpleveldefs['set_last_error'] = 'interp_rawffi.set_last_error' + appleveldefs = { 'SegfaultException' : 'error.SegfaultException', } Modified: pypy/branch/fast-forward/pypy/module/_rawffi/interp_rawffi.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_rawffi/interp_rawffi.py (original) +++ pypy/branch/fast-forward/pypy/module/_rawffi/interp_rawffi.py Tue Sep 21 19:30:26 2010 @@ -516,3 +516,11 @@ def set_errno(space, w_errno): rposix.set_errno(space.int_w(w_errno)) + +def get_last_error(space): + from pypy.rlib.rwin32 import GetLastError + return space.wrap(GetLastError()) + +def get_last_error(space, w_error): + from pypy.rlib.rwin32 import SetLastError + SetLastError(space.uint_w(w_error)) From afa at codespeak.net Tue Sep 21 19:34:57 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 21 Sep 2010 19:34:57 +0200 (CEST) Subject: [pypy-svn] r77250 - pypy/branch/fast-forward/pypy/module/_rawffi Message-ID: <20100921173457.106E1282BFB@codespeak.net> Author: afa Date: Tue Sep 21 19:34:55 2010 New Revision: 77250 Modified: pypy/branch/fast-forward/pypy/module/_rawffi/interp_rawffi.py Log: typo Modified: pypy/branch/fast-forward/pypy/module/_rawffi/interp_rawffi.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_rawffi/interp_rawffi.py (original) +++ pypy/branch/fast-forward/pypy/module/_rawffi/interp_rawffi.py Tue Sep 21 19:34:55 2010 @@ -521,6 +521,6 @@ from pypy.rlib.rwin32 import GetLastError return space.wrap(GetLastError()) -def get_last_error(space, w_error): +def set_last_error(space, w_error): from pypy.rlib.rwin32 import SetLastError SetLastError(space.uint_w(w_error)) From fijal at codespeak.net Tue Sep 21 20:35:31 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Tue, 21 Sep 2010 20:35:31 +0200 (CEST) Subject: [pypy-svn] r77251 - pypy/benchmarks/own Message-ID: <20100921183531.819AD282BFB@codespeak.net> Author: fijal Date: Tue Sep 21 20:35:30 2010 New Revision: 77251 Removed: pypy/benchmarks/own/aes.py pypy/benchmarks/own/crypto_slowaes.py Log: Remove old aes From fijal at codespeak.net Tue Sep 21 20:36:38 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Tue, 21 Sep 2010 20:36:38 +0200 (CEST) Subject: [pypy-svn] r77252 - pypy/benchmarks/own Message-ID: <20100921183638.0D67F282C00@codespeak.net> Author: fijal Date: Tue Sep 21 20:36:37 2010 New Revision: 77252 Added: pypy/benchmarks/own/crypto_pyaes.py pypy/benchmarks/own/pyaes.py Log: Add new versions of those benchmarks Added: pypy/benchmarks/own/crypto_pyaes.py ============================================================================== --- (empty file) +++ pypy/benchmarks/own/crypto_pyaes.py Tue Sep 21 20:36:37 2010 @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import util +import optparse +import time + +import pyaes + +cleartext = "This is a test. What could possibly go wrong? " * 2000 # 92000 bytes + +def benchmark(): + # 128-bit key + key = 'a1f6258c877d5fcd8964484538bfc92c'.decode('hex') + iv = 'ed62e16363638360fdd6ad62112794f0'.decode('hex') + + aes = pyaes.new(key, pyaes.MODE_CBC, iv) + ciphertext = aes.encrypt(cleartext) + + # need to reset IV for decryption + aes = pyaes.new(key, pyaes.MODE_CBC, iv) + plaintext = aes.decrypt(ciphertext) + + assert plaintext == cleartext + +def main(arg): + # XXX warmup + + times = [] + for i in xrange(arg): + t0 = time.time() + o = benchmark() + tk = time.time() + times.append(tk - t0) + return times + +if __name__ == "__main__": + parser = optparse.OptionParser( + usage="%prog [options]", + description="Test the performance of the SlowAES cipher benchmark") + util.add_standard_options_to(parser) + options, args = parser.parse_args() + + util.run_benchmark(options, options.num_runs, main) Added: pypy/benchmarks/own/pyaes.py ============================================================================== --- (empty file) +++ pypy/benchmarks/own/pyaes.py Tue Sep 21 20:36:37 2010 @@ -0,0 +1,498 @@ +"""Simple AES cipher implementation in pure Python following PEP-272 API + +Homepage: https://bitbucket.org/intgr/pyaes/ + +The goal of this module is to be as fast as reasonable in Python while still +being Pythonic and readable/understandable. It is licensed under the permissive +MIT license. + +Hopefully the code is readable and commented enough that it can serve as an +introduction to the AES cipher for Python coders. In fact, it should go along +well with the Stick Figure Guide to AES: +http://www.moserware.com/2009/09/stick-figure-guide-to-advanced.html + +Contrary to intuition, this implementation numbers the 4x4 matrices from top to +bottom for efficiency reasons:: + + 0 4 8 12 + 1 5 9 13 + 2 6 10 14 + 3 7 11 15 + +Effectively it's the transposition of what you'd expect. This actually makes +the code simpler -- except the ShiftRows step, but hopefully the explanation +there clears it up. + +""" + +#### +# Copyright (c) 2010 Marti Raudsepp +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +#### + + +from array import array + +# Globals mandated by PEP 272: +# http://www.python.org/dev/peps/pep-0272/ +MODE_ECB = 1 +MODE_CBC = 2 +#MODE_CTR = 6 + +block_size = 16 +key_size = None + +def new(key, mode, IV=None): + if mode == MODE_ECB: + return ECBMode(AES(key)) + elif mode == MODE_CBC: + if IV is None: + raise ValueError, "CBC mode needs an IV value!" + + return CBCMode(AES(key), IV) + else: + raise NotImplementedError + +#### AES cipher implementation + +class AES(object): + block_size = 16 + + def __init__(self, key): + self.setkey(key) + + def setkey(self, key): + """Sets the key and performs key expansion.""" + + self.key = key + self.key_size = len(key) + + if self.key_size == 16: + self.rounds = 10 + elif self.key_size == 24: + self.rounds = 12 + elif self.key_size == 32: + self.rounds = 14 + else: + raise ValueError, "Key length must be 16, 24 or 32 bytes" + + self.expand_key() + + def expand_key(self): + """Performs AES key expansion on self.key and stores in self.exkey""" + + # The key schedule specifies how parts of the key are fed into the + # cipher's round functions. "Key expansion" means performing this + # schedule in advance. Almost all implementations do this. + # + # Here's a description of AES key schedule: + # http://en.wikipedia.org/wiki/Rijndael_key_schedule + + # The expanded key starts with the actual key itself + exkey = array('B', self.key) + + # extra key expansion steps + if self.key_size == 16: + extra_cnt = 0 + elif self.key_size == 24: + extra_cnt = 2 + else: + extra_cnt = 3 + + # 4-byte temporary variable for key expansion + word = exkey[-4:] + # Each expansion cycle uses 'i' once for Rcon table lookup + for i in xrange(1, 11): + + #### key schedule core: + # left-rotate by 1 byte + word = word[1:4] + word[0:1] + + # apply S-box to all bytes + for j in xrange(4): + word[j] = aes_sbox[word[j]] + + # apply the Rcon table to the leftmost byte + word[0] = word[0] ^ aes_Rcon[i] + #### end key schedule core + + for z in xrange(4): + for j in xrange(4): + # mix in bytes from the last subkey + word[j] ^= exkey[-self.key_size + j] + exkey.extend(word) + + # Last key expansion cycle always finishes here + if len(exkey) >= (self.rounds+1) * self.block_size: + break + + # Special substitution step for 256-bit key + if self.key_size == 32: + for j in xrange(4): + # mix in bytes from the last subkey XORed with S-box of + # current word bytes + word[j] = aes_sbox[word[j]] ^ exkey[-self.key_size + j] + exkey.extend(word) + + # Twice for 192-bit key, thrice for 256-bit key + for z in xrange(extra_cnt): + for j in xrange(4): + # mix in bytes from the last subkey + word[j] ^= exkey[-self.key_size + j] + exkey.extend(word) + + self.exkey = exkey + + def add_round_key(self, block, round): + """AddRoundKey step in AES. This is where the key is mixed into plaintext""" + + offset = round * 16 + exkey = self.exkey + + for i in xrange(16): + block[i] ^= exkey[offset + i] + + #print 'AddRoundKey:', block + + def sub_bytes(self, block, sbox): + """SubBytes step, apply S-box to all bytes + + Depending on whether encrypting or decrypting, a different sbox array + is passed in. + """ + + for i in xrange(16): + block[i] = sbox[block[i]] + + #print 'SubBytes :', block + + def shift_rows(self, b): + """ShiftRows step. Shifts 2nd row to left by 1, 3rd row by 2, 4th row by 3 + + Since we're performing this on a transposed matrix, cells are numbered + from top to bottom:: + + 0 4 8 12 -> 0 4 8 12 -- 1st row doesn't change + 1 5 9 13 -> 5 9 13 1 -- row shifted to left by 1 (wraps around) + 2 6 10 14 -> 10 14 2 6 -- shifted by 2 + 3 7 11 15 -> 15 3 7 11 -- shifted by 3 + """ + + b[1], b[5], b[ 9], b[13] = b[ 5], b[ 9], b[13], b[ 1] + b[2], b[6], b[10], b[14] = b[10], b[14], b[ 2], b[ 6] + b[3], b[7], b[11], b[15] = b[15], b[ 3], b[ 7], b[11] + + #print 'ShiftRows :', b + + def shift_rows_inv(self, b): + """Similar to shift_rows above, but performed in inverse for decryption.""" + + b[ 5], b[ 9], b[13], b[ 1] = b[1], b[5], b[ 9], b[13] + b[10], b[14], b[ 2], b[ 6] = b[2], b[6], b[10], b[14] + b[15], b[ 3], b[ 7], b[11] = b[3], b[7], b[11], b[15] + + #print 'ShiftRows :', b + + def mix_columns(self, block): + """MixColumns step. Mixes the values in each column""" + + # Cache global multiplication tables (see below) + mul_by_2 = gf_mul_by_2 + mul_by_3 = gf_mul_by_3 + + # Since we're dealing with a transposed matrix, columns are already + # sequential + for i in xrange(4): + col = i * 4 + + v0, v1, v2, v3 = block[col : col+4] + + block[col ] = mul_by_2[v0] ^ v3 ^ v2 ^ mul_by_3[v1] + block[col+1] = mul_by_2[v1] ^ v0 ^ v3 ^ mul_by_3[v2] + block[col+2] = mul_by_2[v2] ^ v1 ^ v0 ^ mul_by_3[v3] + block[col+3] = mul_by_2[v3] ^ v2 ^ v1 ^ mul_by_3[v0] + + #print 'MixColumns :', block + + def mix_columns_inv(self, block): + """Similar to mix_columns above, but performed in inverse for decryption.""" + + # Cache global multiplication tables (see below) + mul_9 = gf_mul_by_9 + mul_11 = gf_mul_by_11 + mul_13 = gf_mul_by_13 + mul_14 = gf_mul_by_14 + + # Since we're dealing with a transposed matrix, columns are already + # sequential + for i in xrange(4): + col = i * 4 + + v0, v1, v2, v3 = block[col : col+4] + + block[col ] = mul_14[v0] ^ mul_9[v3] ^ mul_13[v2] ^ mul_11[v1] + block[col+1] = mul_14[v1] ^ mul_9[v0] ^ mul_13[v3] ^ mul_11[v2] + block[col+2] = mul_14[v2] ^ mul_9[v1] ^ mul_13[v0] ^ mul_11[v3] + block[col+3] = mul_14[v3] ^ mul_9[v2] ^ mul_13[v1] ^ mul_11[v0] + + #print 'MixColumns :', block + + def encrypt_block(self, block): + """Encrypts a single block. This is the main AES function""" + + # For efficiency reasons, the state between steps is transmitted via a + # mutable array, not returned. + self.add_round_key(block, 0) + + for round in xrange(1, self.rounds): + self.sub_bytes(block, aes_sbox) + self.shift_rows(block) + self.mix_columns(block) + self.add_round_key(block, round) + + self.sub_bytes(block, aes_sbox) + self.shift_rows(block) + # no mix_columns step in the last round + self.add_round_key(block, self.rounds) + + def decrypt_block(self, block): + """Decrypts a single block. This is the main AES decryption function""" + + # For efficiency reasons, the state between steps is transmitted via a + # mutable array, not returned. + self.add_round_key(block, self.rounds) + + # count rounds down from 15 ... 1 + for round in xrange(self.rounds-1, 0, -1): + self.shift_rows_inv(block) + self.sub_bytes(block, aes_inv_sbox) + self.add_round_key(block, round) + self.mix_columns_inv(block) + + self.shift_rows_inv(block) + self.sub_bytes(block, aes_inv_sbox) + self.add_round_key(block, 0) + # no mix_columns step in the last round + + +#### ECB mode implementation + +class ECBMode(object): + """Electronic CodeBook (ECB) mode encryption. + + Basically this mode applies the cipher function to each block individually; + no feedback is done. NB! This is insecure for almost all purposes + """ + + def __init__(self, cipher): + self.cipher = cipher + self.block_size = cipher.block_size + + def ecb(self, data, block_func): + """Perform ECB mode with the given function""" + + if len(data) % self.block_size != 0: + raise ValueError, "Plaintext length must be multiple of 16" + + block_size = self.block_size + data = array('B', data) + + for offset in xrange(0, len(data), block_size): + block = data[offset : offset+block_size] + block_func(block) + data[offset : offset+block_size] = block + + return data.tostring() + + def encrypt(self, data): + """Encrypt data in ECB mode""" + + return self.ecb(data, self.cipher.encrypt_block) + + def decrypt(self, data): + """Decrypt data in ECB mode""" + + return self.ecb(data, self.cipher.decrypt_block) + +#### CBC mode + +class CBCMode(object): + """Cipher Block Chaining (CBC) mode encryption. This mode avoids content leaks. + + In CBC encryption, each plaintext block is XORed with the ciphertext block + preceding it; decryption is simply the inverse. + """ + + # A better explanation of CBC can be found here: + # http://en.wikipedia.org/wiki/Block_cipher_modes_of_operation#Cipher-block_chaining_.28CBC.29 + + def __init__(self, cipher, IV): + self.cipher = cipher + self.block_size = cipher.block_size + self.IV = array('B', IV) + + def encrypt(self, data): + """Encrypt data in CBC mode""" + + block_size = self.block_size + if len(data) % block_size != 0: + raise ValueError, "Plaintext length must be multiple of 16" + + data = array('B', data) + IV = self.IV + + for offset in xrange(0, len(data), block_size): + block = data[offset : offset+block_size] + + # Perform CBC chaining + for i in xrange(block_size): + block[i] ^= IV[i] + + self.cipher.encrypt_block(block) + data[offset : offset+block_size] = block + IV = block + + self.IV = IV + return data.tostring() + + def decrypt(self, data): + """Decrypt data in CBC mode""" + + block_size = self.block_size + if len(data) % block_size != 0: + raise ValueError, "Ciphertext length must be multiple of 16" + + data = array('B', data) + IV = self.IV + + for offset in xrange(0, len(data), block_size): + ctext = data[offset : offset+block_size] + block = ctext[:] + self.cipher.decrypt_block(block) + + # Perform CBC chaining + #for i in xrange(block_size): + # data[offset + i] ^= IV[i] + for i in xrange(block_size): + block[i] ^= IV[i] + data[offset : offset+block_size] = block + + IV = ctext + #data[offset : offset+block_size] = block + + self.IV = IV + return data.tostring() + +#### + +def galois_multiply(a, b): + """Galois Field multiplicaiton for AES""" + p = 0 + while b: + if b & 1: + p ^= a + a <<= 1 + if a & 0x100: + a ^= 0x1b + b >>= 1 + + return p & 0xff + +# Precompute the multiplication tables for encryption +gf_mul_by_2 = array('B', [galois_multiply(x, 2) for x in range(256)]) +gf_mul_by_3 = array('B', [galois_multiply(x, 3) for x in range(256)]) +# ... for decryption +gf_mul_by_9 = array('B', [galois_multiply(x, 9) for x in range(256)]) +gf_mul_by_11 = array('B', [galois_multiply(x, 11) for x in range(256)]) +gf_mul_by_13 = array('B', [galois_multiply(x, 13) for x in range(256)]) +gf_mul_by_14 = array('B', [galois_multiply(x, 14) for x in range(256)]) + +#### + +# The S-box is a 256-element array, that maps a single byte value to another +# byte value. Since it's designed to be reversible, each value occurs only once +# in the S-box +# +# More information: http://en.wikipedia.org/wiki/Rijndael_S-box + +aes_sbox = array('B', + '637c777bf26b6fc53001672bfed7ab76' + 'ca82c97dfa5947f0add4a2af9ca472c0' + 'b7fd9326363ff7cc34a5e5f171d83115' + '04c723c31896059a071280e2eb27b275' + '09832c1a1b6e5aa0523bd6b329e32f84' + '53d100ed20fcb15b6acbbe394a4c58cf' + 'd0efaafb434d338545f9027f503c9fa8' + '51a3408f929d38f5bcb6da2110fff3d2' + 'cd0c13ec5f974417c4a77e3d645d1973' + '60814fdc222a908846eeb814de5e0bdb' + 'e0323a0a4906245cc2d3ac629195e479' + 'e7c8376d8dd54ea96c56f4ea657aae08' + 'ba78252e1ca6b4c6e8dd741f4bbd8b8a' + '703eb5664803f60e613557b986c11d9e' + 'e1f8981169d98e949b1e87e9ce5528df' + '8ca1890dbfe6426841992d0fb054bb16'.decode('hex') +) + +# This is the inverse of the above. In other words: +# aes_inv_sbox[aes_sbox[val]] == val + +aes_inv_sbox = array('B', + '52096ad53036a538bf40a39e81f3d7fb' + '7ce339829b2fff87348e4344c4dee9cb' + '547b9432a6c2233dee4c950b42fac34e' + '082ea16628d924b2765ba2496d8bd125' + '72f8f66486689816d4a45ccc5d65b692' + '6c704850fdedb9da5e154657a78d9d84' + '90d8ab008cbcd30af7e45805b8b34506' + 'd02c1e8fca3f0f02c1afbd0301138a6b' + '3a9111414f67dcea97f2cfcef0b4e673' + '96ac7422e7ad3585e2f937e81c75df6e' + '47f11a711d29c5896fb7620eaa18be1b' + 'fc563e4bc6d279209adbc0fe78cd5af4' + '1fdda8338807c731b11210592780ec5f' + '60517fa919b54a0d2de57a9f93c99cef' + 'a0e03b4dae2af5b0c8ebbb3c83539961' + '172b047eba77d626e169146355210c7d'.decode('hex') +) + +# The Rcon table is used in AES's key schedule (key expansion) +# It's a pre-computed table of exponentation of 2 in AES's finite field +# +# More information: http://en.wikipedia.org/wiki/Rijndael_key_schedule + +aes_Rcon = array('B', + '8d01020408102040801b366cd8ab4d9a' + '2f5ebc63c697356ad4b37dfaefc59139' + '72e4d3bd61c29f254a943366cc831d3a' + '74e8cb8d01020408102040801b366cd8' + 'ab4d9a2f5ebc63c697356ad4b37dfaef' + 'c5913972e4d3bd61c29f254a943366cc' + '831d3a74e8cb8d01020408102040801b' + '366cd8ab4d9a2f5ebc63c697356ad4b3' + '7dfaefc5913972e4d3bd61c29f254a94' + '3366cc831d3a74e8cb8d010204081020' + '40801b366cd8ab4d9a2f5ebc63c69735' + '6ad4b37dfaefc5913972e4d3bd61c29f' + '254a943366cc831d3a74e8cb8d010204' + '08102040801b366cd8ab4d9a2f5ebc63' + 'c697356ad4b37dfaefc5913972e4d3bd' + '61c29f254a943366cc831d3a74e8cb'.decode('hex') +) From fijal at codespeak.net Tue Sep 21 21:19:32 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Tue, 21 Sep 2010 21:19:32 +0200 (CEST) Subject: [pypy-svn] r77253 - pypy/trunk/pypy/translator/c/gcc Message-ID: <20100921191932.37385282C02@codespeak.net> Author: fijal Date: Tue Sep 21 21:19:27 2010 New Revision: 77253 Modified: pypy/trunk/pypy/translator/c/gcc/trackgcroot.py Log: I think xchgq is the same, just for 64bit Modified: pypy/trunk/pypy/translator/c/gcc/trackgcroot.py ============================================================================== --- pypy/trunk/pypy/translator/c/gcc/trackgcroot.py (original) +++ pypy/trunk/pypy/translator/c/gcc/trackgcroot.py Tue Sep 21 21:19:27 2010 @@ -856,6 +856,7 @@ visit_and = FunctionGcRootTracker._visit_and visit_xchgl = FunctionGcRootTracker._visit_xchg + visit_xchgq = FunctionGcRootTracker._visit_xchg # used in "xor reg, reg" to create a NULL GC ptr visit_xorl = FunctionGcRootTracker.binary_insn From afa at codespeak.net Tue Sep 21 21:39:48 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 21 Sep 2010 21:39:48 +0200 (CEST) Subject: [pypy-svn] r77254 - pypy/branch/fast-forward/pypy/module/_multiprocessing Message-ID: <20100921193948.E1A81282BFB@codespeak.net> Author: afa Date: Tue Sep 21 21:39:47 2010 New Revision: 77254 Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py Log: Fix test on 64bit platform Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py (original) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py Tue Sep 21 21:39:47 2010 @@ -2,7 +2,6 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.rpython.lltypesystem import rffi, lltype -from pypy.rlib.rarithmetic import r_uint import sys, os READABLE = 1 @@ -206,7 +205,7 @@ # "header" and the "body" of the message and send them at once. message = lltype.malloc(rffi.CCHARP.TO, size + 4, flavor='raw') try: - rffi.cast(rffi.UINTP, message)[0] = r_uint(size) # XXX htonl! + rffi.cast(rffi.UINTP, message)[0] = rffi.r_uint(size) # XXX htonl! i = size - 1 while i >= 0: message[4 + i] = buffer[offset + i] From hakanardo at codespeak.net Tue Sep 21 22:22:35 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Tue, 21 Sep 2010 22:22:35 +0200 (CEST) Subject: [pypy-svn] r77255 - in pypy/branch/jit-loop-invaraints/pypy: jit/metainterp/optimizeopt jit/metainterp/test module/pypyjit/test Message-ID: <20100921202235.71800282BFB@codespeak.net> Author: hakanardo Date: Tue Sep 21 22:22:33 2010 New Revision: 77255 Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/jit-loop-invaraints/pypy/module/pypyjit/test/test_pypy_c.py Log: Moving most guards and *_ovf too. Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py Tue Sep 21 22:22:33 2010 @@ -42,24 +42,39 @@ jmp.descr = loop.token preamble.operations.append(jmp) - elif op.is_always_pure(): - for a in op.args: - if self.get_constant_box(a) is None: - if a not in self.optimizer.values: - break - v = self.getvalue(a) - if not v.invariant: - break - else: - print "P: ", op, op.opnum - op.invariant = True - self.emit_operation(op) - if self.get_constant_box(op.result) is None: - v = self.getvalue(op.result) - v.invariant = True - box = v.force_box() - if box not in self.invariant_boxes: - self.invariant_boxes.append(box) + elif (op.is_always_pure() or op.is_foldable_guard() or + op.is_ovf()): + if self.has_invariant_args(op): + self.emit_invariant(op) + return + + elif op.is_guard_overflow(): + prev_op = self.optimizer.loop.operations[self.optimizer.i - 1] + v = self.getvalue(prev_op.result) + if v.invariant: + self.emit_invariant(op) return self.emit_operation(op) + + def emit_invariant(self, op): + print "P: ", op, op.opnum + op.invariant = True + self.emit_operation(op) + if self.get_constant_box(op.result) is None: + v = self.getvalue(op.result) + v.invariant = True + box = v.force_box() + if box and box not in self.invariant_boxes: + self.invariant_boxes.append(box) + + def has_invariant_args(self, op): + for a in op.args: + if self.get_constant_box(a) is None: + if a not in self.optimizer.values: + return False + v = self.getvalue(a) + if not v.invariant: + return False + return True + Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_optimizeopt.py Tue Sep 21 22:22:33 2010 @@ -3870,6 +3870,22 @@ """ self.optimize_loop(ops, 'Not, Not', expected) + def test_loop_invariant_ovf(self): + ops = """ + [i0, i1] + i2 = int_add_ovf(i0, i0) + guard_no_overflow() [] + i3 = int_add_ovf(i2, i1) + guard_no_overflow() [] + jump(i0, i3) + """ + expected = """ + [i0, i1, i2] + i3 = int_add_ovf(i2, i1) + guard_no_overflow() [] + jump(i0, i3, i2) + """ + self.optimize_loop(ops, 'Not, Not', expected) Modified: pypy/branch/jit-loop-invaraints/pypy/module/pypyjit/test/test_pypy_c.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/module/pypyjit/test/test_pypy_c.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/module/pypyjit/test/test_pypy_c.py Tue Sep 21 22:22:33 2010 @@ -1122,7 +1122,7 @@ sa += a*a i += 1 return sa - ''', 34, ([2], 8000)) + ''', 35, ([2], 8000)) def test_invariant_mul_bridge(self): self.run_source(''' @@ -1135,7 +1135,7 @@ a = 7 i += 1 return sa - ''', 61, ([2], 105910)) + ''', 62, ([2], 105910)) From afa at codespeak.net Wed Sep 22 01:38:42 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Wed, 22 Sep 2010 01:38:42 +0200 (CEST) Subject: [pypy-svn] r77256 - pypy/branch/fast-forward/pypy/module/_multiprocessing Message-ID: <20100921233842.99A02282C02@codespeak.net> Author: afa Date: Wed Sep 22 01:38:40 2010 New Revision: 77256 Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py Log: Fix translation Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py (original) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py Wed Sep 22 01:38:40 2010 @@ -1,7 +1,10 @@ from pypy.interpreter.baseobjspace import ObjSpace, Wrappable, W_Root from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.error import ( + OperationError, wrap_oserror, operationerrfmt) from pypy.rpython.lltypesystem import rffi, lltype +from pypy.rlib.rarithmetic import intmask import sys, os READABLE = 1 @@ -10,6 +13,18 @@ PY_SSIZE_T_MAX = sys.maxint PY_SSIZE_T_MIN = -sys.maxint - 1 +MP_END_OF_FILE = -1002 +MP_EARLY_END_OF_FILE = -1003 +MP_BAD_MESSAGE_LENGTH = -1004 + +def mp_error(space, res): + return OperationError(space.w_ValueError, + space.wrap("MULTIPROCESSING")) + +def BufferTooShort(space): + return OperationError(space.w_ValueError, + space.wrap("BUFFERTOOSHORT")) + class W_BaseConnection(Wrappable): BUFFER_SIZE = 1024 @@ -22,15 +37,25 @@ lltype.free(self.buffer, flavor='raw') self.do_close() + # Abstract methods + def do_close(self): + raise NotImplementedError + def is_valid(self): + return False + def do_send_string(self, space, buffer, offset, size): + raise NotImplementedError + def do_recv_string(self, space, maxlength): + raise NotImplementedError + def close(self): self.do_close() def closed_get(space, self): - return space.w_bool(not self.is_valid()) + return space.newbool(not self.is_valid()) def readable_get(space, self): - return space.w_bool(self.flags & READABLE) + return space.newbool(bool(self.flags & READABLE)) def writable_get(space, self): - return space.w_bool(self.flags & WRITABLE) + return space.newbool(bool(self.flags & WRITABLE)) def _check_readable(self, space): if not self.flags & READABLE: @@ -71,13 +96,14 @@ space.wrap("maxlength < 0")) res, newbuf = self.do_recv_string(space, maxlength) + res = intmask(res) # XXX why? try: if res < 0: if res == MP_BAD_MESSAGE_LENGTH: self.flags &= ~READABLE if self.flags == 0: self.close() - raise mp_error(res) + raise mp_error(space, res) if newbuf: return space.wrap(rffi.charpsize2str(newbuf, res)) @@ -93,16 +119,17 @@ length = rwbuffer.getlength() res, newbuf = self.do_recv_string(space, length - offset) + res = intmask(res) # XXX why? try: if res < 0: if res == MP_BAD_MESSAGE_LENGTH: self.flags &= ~READABLE if self.flags == 0: self.close() - raise mp_error(res) + raise mp_error(space, res) if res > length - offset: - raise OperationError(BufferTooShort) + raise BufferTooShort(space) if newbuf: rwbuffer.setslice(offset, rffi.charpsize2str(newbuf, res)) else: @@ -133,13 +160,14 @@ self._check_readable(space) res, newbuf = self.do_recv_string(space, PY_SSIZE_T_MAX) + res = intmask(res) # XXX why? try: if res < 0: if res == MP_BAD_MESSAGE_LENGTH: self.flags &= ~READABLE if self.flags == 0: self.close() - raise mp_error(res) + raise mp_error(space, res) if newbuf: w_received = space.wrap(rffi.charpsize2str(newbuf, res)) else: @@ -156,9 +184,7 @@ return w_unpickled - - -base_typedef = TypeDef( +W_BaseConnection.typedef = TypeDef( 'BaseConnection', closed = GetSetProperty(W_BaseConnection.closed_get), readable = GetSetProperty(W_BaseConnection.readable_get), @@ -181,7 +207,7 @@ self.fd = fd @unwrap_spec(ObjSpace, W_Root, int, bool, bool) - def descr_new(space, w_subtype, fd, readable=True, writable=True): + def descr_new_file(space, w_subtype, fd, readable=True, writable=True): flags = (readable and READABLE) | (writable and WRITABLE) self = space.allocate_instance(W_FileConnection, w_subtype) @@ -217,17 +243,17 @@ def do_recv_string(self, space, maxlength): length_ptr = lltype.malloc(rffi.CArrayPtr(rffi.UINT).TO, 1, flavor='raw') - self._recvall(rffi.cast(rffi.CCHARP, length_ptr), 4) - length = length_ptr[0] + self._recvall(space, rffi.cast(rffi.CCHARP, length_ptr), 4) + length = intmask(length_ptr[0]) if length > maxlength: - return MP_BAD_MESSAGE_LENGTH + return MP_BAD_MESSAGE_LENGTH, lltype.nullptr(rffi.CCHARP.TO) if length <= self.BUFFER_SIZE: - self._recvall(self.buffer, length) - return length, None + self._recvall(space, self.buffer, length) + return length, lltype.nullptr(rffi.CCHARP.TO) else: newbuf = lltype.malloc(rffi.CCHARP.TO, length, flavor='raw') - self._recvall(newbuf, length) + self._recvall(space, newbuf, length) return length, newbuf def _sendall(self, space, message, size): @@ -241,7 +267,8 @@ size -= count message = rffi.ptradd(message, count) - def _recvall(self, buffer, length): + def _recvall(self, space, buffer, length): + length = intmask(length) remaining = length while remaining > 0: try: @@ -251,9 +278,9 @@ count = len(data) if count == 0: if remaining == length: - return MP_END_OF_FILE + raise mp_error(space, MP_END_OF_FILE) else: - return MP_EARLY_END_OF_FILE + raise mp_error(space, MP_EARLY_END_OF_FILE) # XXX inefficient for i in range(count): buffer[i] = data[i] @@ -261,8 +288,8 @@ buffer = rffi.ptradd(buffer, count) W_FileConnection.typedef = TypeDef( - 'Connection', base_typedef, - __new__ = interp2app(W_FileConnection.descr_new.im_func), + 'Connection', W_BaseConnection.typedef, + __new__ = interp2app(W_FileConnection.descr_new_file.im_func), fileno = interp2app(W_FileConnection.fileno), ) @@ -275,7 +302,7 @@ self.handle = handle @unwrap_spec(ObjSpace, W_Root, W_Root, bool, bool) - def descr_new(space, w_subtype, w_handle, readable=True, writable=True): + def descr_new_pipe(space, w_subtype, w_handle, readable=True, writable=True): from pypy.module._multiprocessing.interp_win32 import handle_w handle = handle_w(space, w_handle) flags = (readable and READABLE) | (writable and WRITABLE) @@ -319,7 +346,7 @@ if (result == 0 and rwin32.GetLastError() == ERROR_NO_SYSTEM_RESOURCES): - raise operrfmt( + raise operationerrfmt( space.w_ValueError, "Cannot send %ld bytes over connection", size) finally: @@ -340,7 +367,7 @@ self.buffer, min(self.BUFFER_SIZE, maxlength), read_ptr, rffi.NULL) if result: - return read_ptr[0], None + return read_ptr[0], lltype.nullptr(rffi.CCHARP.TO) err = rwin32.GetLastError() if err == ERROR_BROKEN_PIPE: @@ -355,7 +382,7 @@ length = read_ptr[0] + left_ptr[0] if length > maxlength: - return MP_BAD_MESSAGE_LENGTH + return MP_BAD_MESSAGE_LENGTH, lltype.nullptr(rffi.CCHARP.TO) newbuf = lltype.malloc(rffi.CCHARP.TO, length + 1, flavor='raw') raw_memcopy(self.buffer, newbuf, read_ptr[0]) @@ -368,12 +395,13 @@ return length, newbuf else: rffi.free_charp(newbuf) - return MP_STANDARD_ERROR, None + return MP_STANDARD_ERROR, lltype.nullptr(rffi.CCHARP.TO) finally: lltype.free(read_ptr, flavor='raw') -W_PipeConnection.typedef = TypeDef( - 'PipeConnection', base_typedef, - __new__ = interp2app(W_PipeConnection.descr_new.im_func), - fileno = interp2app(W_PipeConnection.fileno), -) +if sys.platform == 'win32': + W_PipeConnection.typedef = TypeDef( + 'PipeConnection', W_BaseConnection.typedef, + __new__ = interp2app(W_PipeConnection.descr_new_pipe.im_func), + fileno = interp2app(W_PipeConnection.fileno), + ) From fijal at codespeak.net Wed Sep 22 09:56:09 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 22 Sep 2010 09:56:09 +0200 (CEST) Subject: [pypy-svn] r77257 - pypy/trunk/pypy/translator/c Message-ID: <20100922075609.D03E5282BFB@codespeak.net> Author: fijal Date: Wed Sep 22 09:56:07 2010 New Revision: 77257 Modified: pypy/trunk/pypy/translator/c/genc.py Log: (jell) encode used python in makefile. this way we can have different python versions and they'll be used for trackgcroot (example - 32 and 64 bit) Modified: pypy/trunk/pypy/translator/c/genc.py ============================================================================== --- pypy/trunk/pypy/translator/c/genc.py (original) +++ pypy/trunk/pypy/translator/c/genc.py Wed Sep 22 09:56:07 2010 @@ -592,7 +592,7 @@ if sys.platform == 'win32': python = sys.executable.replace('\\', '/') + ' ' else: - python = '' + python = sys.executable + ' ' if self.translator.platform.name == 'msvc': lblofiles = [] From fijal at codespeak.net Wed Sep 22 10:26:03 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 22 Sep 2010 10:26:03 +0200 (CEST) Subject: [pypy-svn] r77258 - pypy/benchmarks Message-ID: <20100922082603.E2C46282BFB@codespeak.net> Author: fijal Date: Wed Sep 22 10:26:02 2010 New Revision: 77258 Modified: pypy/benchmarks/benchmarks.py Log: Enable crypto_pyaes benchmark on nightly run Modified: pypy/benchmarks/benchmarks.py ============================================================================== --- pypy/benchmarks/benchmarks.py (original) +++ pypy/benchmarks/benchmarks.py Wed Sep 22 10:26:02 2010 @@ -44,7 +44,8 @@ } for name in ['float', 'nbody_modified', 'meteor-contest', 'fannkuch', - 'spectral-norm', 'chaos', 'telco', 'go', 'pyflate-fast', 'raytrace-simple']: + 'spectral-norm', 'chaos', 'telco', 'go', 'pyflate-fast', + 'raytrace-simple', 'crypto_pyaes']: _register_new_bm(name, name, globals(), **opts.get(name, {})) for name in ['names', 'iteration', 'tcp', 'pb']:#, 'accepts', 'web']: if name == 'web': From antocuni at codespeak.net Wed Sep 22 11:24:37 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Wed, 22 Sep 2010 11:24:37 +0200 (CEST) Subject: [pypy-svn] r77260 - pypy/branch/resoperation-refactoring/pypy/jit/metainterp Message-ID: <20100922092437.C2379282BFB@codespeak.net> Author: antocuni Date: Wed Sep 22 11:24:35 2010 New Revision: 77260 Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py Log: revert r77212, as it did not give any benefit Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py Wed Sep 22 11:24:35 2010 @@ -8,7 +8,7 @@ from pypy.tool.uid import uid from pypy.conftest import option -from pypy.jit.metainterp.resoperation import ResOperation, ResOperation_fast, rop +from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.codewriter import heaptracker # ____________________________________________________________ @@ -834,12 +834,6 @@ self.operations.append(op) return op - def record_fast(self, opnum, resbox, descr, *args): - #op = ResOperation(opnum, argboxes, resbox, descr) - op = ResOperation_fast(opnum, resbox, descr, *args) - self.operations.append(op) - return op - def substitute_operation(self, position, opnum, argboxes, descr=None): resbox = self.operations[position].result op = ResOperation(opnum, argboxes, resbox, descr) Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py Wed Sep 22 11:24:35 2010 @@ -1444,8 +1444,8 @@ if rop._ALWAYS_PURE_FIRST <= opnum <= rop._ALWAYS_PURE_LAST: return self._record_helper_pure(opnum, resbox, descr, *argboxes) else: - return self._record_helper_nonpure(opnum, resbox, descr, - *argboxes) + return self._record_helper_nonpure_varargs(opnum, resbox, descr, + list(argboxes)) @specialize.arg(1) def execute_and_record_varargs(self, opnum, argboxes, descr=None): @@ -1470,7 +1470,7 @@ return resbox else: resbox = resbox.nonconstbox() # ensure it is a Box - return self._record_helper_nonpure(opnum, resbox, descr, *argboxes) + return self._record_helper_nonpure_varargs(opnum, resbox, descr, list(argboxes)) def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes): canfold = self._all_constants_varargs(argboxes) @@ -1490,15 +1490,6 @@ self.attach_debug_info(op) return resbox - def _record_helper_nonpure(self, opnum, resbox, descr, *argboxes): - assert resbox is None or isinstance(resbox, Box) - # record the operation - profiler = self.staticdata.profiler - profiler.count_ops(opnum, RECORDED_OPS) - op = self.history.record_fast(opnum, resbox, descr, *argboxes) - self.attach_debug_info(op) - return resbox - def attach_debug_info(self, op): if (not we_are_translated() and op is not None and getattr(self, 'framestack', None)): Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py Wed Sep 22 11:24:35 2010 @@ -1,7 +1,6 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import make_sure_not_resized - def ResOperation(opnum, args, result, descr=None): cls = opclasses[opnum] op = cls(result) @@ -12,17 +11,6 @@ return op -def ResOperation_fast(opnum, result, descr, *args): - cls = opclasses[opnum] - op = cls(result) - op.initarglist_fast(*args) - if descr is not None: - assert isinstance(op, ResOpWithDescr) - op.setdescr(descr) - return op - - - class AbstractResOp(object): """The central ResOperation class, representing one operation.""" @@ -46,10 +34,6 @@ "This is supposed to be called only just after the ResOp has been created" raise NotImplementedError - def initarglist_fast(self, *args): - "This is supposed to be called only just after the ResOp has been created" - raise NotImplementedError - def getarglist(self): raise NotImplementedError @@ -230,9 +214,6 @@ def initarglist(self, args): assert len(args) == 0 - def initarglist_fast(self, *args): - assert len(args) == 0 - def getarglist(self): return [] @@ -254,10 +235,6 @@ assert len(args) == 1 self._arg0, = args - def initarglist_fast(self, *args): - assert len(args) == 1 - self._arg0, = args - def getarglist(self): return [self._arg0] @@ -286,10 +263,6 @@ assert len(args) == 2 self._arg0, self._arg1 = args - def initarglist_fast(self, *args): - assert len(args) == 2 - self._arg0, self._arg1 = args - def getarglist(self): return [self._arg0, self._arg1, self._arg2] @@ -326,10 +299,6 @@ assert len(args) == 3 self._arg0, self._arg1, self._arg2 = args - def initarglist_fast(self, *args): - assert len(args) == 3 - self._arg0, self._arg1, self._arg2 = args - def getarglist(self): return [self._arg0, self._arg1, self._arg2] @@ -363,9 +332,6 @@ def initarglist(self, args): self._args = args - def initarglist_fast(self, *args): - self._args = list(args) - def getarglist(self): return self._args @@ -489,7 +455,7 @@ 'STRSETITEM/3', 'UNICODESETITEM/3', 'NEWUNICODE/1', - #'RUNTIMENEW/1', # ootype operation + #'RUNTIMENEW/1', # ootype operation 'COND_CALL_GC_WB/2d', # [objptr, newvalue] (for the write barrier) 'DEBUG_MERGE_POINT/1', # debugging only 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend From arigo at codespeak.net Wed Sep 22 11:50:19 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 22 Sep 2010 11:50:19 +0200 (CEST) Subject: [pypy-svn] r77261 - pypy/branch/jit-str Message-ID: <20100922095019.46CA6282BFB@codespeak.net> Author: arigo Date: Wed Sep 22 11:50:17 2010 New Revision: 77261 Added: pypy/branch/jit-str/ - copied from r77260, pypy/trunk/ Log: A branch in which to improve jitting of strings, starting from rstr. From arigo at codespeak.net Wed Sep 22 11:52:53 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 22 Sep 2010 11:52:53 +0200 (CEST) Subject: [pypy-svn] r77262 - in pypy/branch/jit-str/pypy: jit/codewriter jit/codewriter/test jit/metainterp jit/metainterp/optimizeopt jit/metainterp/test rpython/lltypesystem Message-ID: <20100922095253.15BF1282BFB@codespeak.net> Author: arigo Date: Wed Sep 22 11:52:51 2010 New Revision: 77262 Modified: pypy/branch/jit-str/pypy/jit/codewriter/support.py pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py pypy/branch/jit-str/pypy/jit/metainterp/optimizefindnode.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/rewrite.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeutil.py pypy/branch/jit-str/pypy/jit/metainterp/resume.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py pypy/branch/jit-str/pypy/jit/metainterp/warmstate.py pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py Log: In-progress. Add "virtual one-character strings". Modified: pypy/branch/jit-str/pypy/jit/codewriter/support.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/support.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/support.py Wed Sep 22 11:52:51 2010 @@ -275,10 +275,12 @@ # ---------- strings and unicode ---------- - _ll_5_string_copy_contents = ll_rstr.copy_string_contents - _ll_1_str_str2unicode = ll_rstr.LLHelpers.ll_str2unicode - _ll_5_unicode_copy_contents = ll_rstr.copy_unicode_contents + + _ll_2_stroruni_concat = ll_rstr.LLHelpers.ll_strconcat + _ll_2_stroruni_slice_startonly = ll_rstr.LLHelpers.ll_stringslice_startonly + _ll_3_stroruni_slice_startstop = ll_rstr.LLHelpers.ll_stringslice_startstop + _ll_1_stroruni_slice_minusone = ll_rstr.LLHelpers.ll_stringslice_minusone # ---------- malloc with del ---------- Modified: pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py Wed Sep 22 11:52:51 2010 @@ -1,3 +1,4 @@ +import py import random from pypy.objspace.flow.model import FunctionGraph, Block, Link from pypy.objspace.flow.model import SpaceOperation, Variable, Constant @@ -7,6 +8,9 @@ from pypy.translator.unsimplify import varoftype from pypy.jit.codewriter import heaptracker +def const(x): + return Constant(x, lltype.typeOf(x)) + class FakeRTyper: class type_system: name = 'lltypesystem' instance_reprs = {} @@ -67,6 +71,14 @@ def calldescr_canraise(self, calldescr): return False +class FakeBuiltinCallControl: + def guess_call_kind(self, op): + return 'builtin' + def getcalldescr(self, op): + return 'calldescr' + def calldescr_canraise(self, calldescr): + return False + def test_optimize_goto_if_not(): v1 = Variable() @@ -107,7 +119,7 @@ assert block.operations == [] assert block.exitswitch == ('int_gt', v1, v2) assert block.exits == exits - assert exits[1].args == [Constant(True, lltype.Bool)] + assert exits[1].args == [const(True)] def test_optimize_goto_if_not__unknownop(): v3 = Variable(); v3.concretetype = lltype.Bool @@ -159,8 +171,8 @@ 'float_gt': ('float_gt', 'float_lt'), } v3 = varoftype(lltype.Signed) - for v1 in [varoftype(lltype.Signed), Constant(42, lltype.Signed)]: - for v2 in [varoftype(lltype.Signed), Constant(43, lltype.Signed)]: + for v1 in [varoftype(lltype.Signed), const(42)]: + for v2 in [varoftype(lltype.Signed), const(43)]: for name1, name2 in ops.items(): op = SpaceOperation(name1, [v1, v2], v3) op1 = Transformer(FakeCPU()).rewrite_operation(op) @@ -177,8 +189,8 @@ def test_symmetric_int_add_ovf(): v3 = varoftype(lltype.Signed) - for v1 in [varoftype(lltype.Signed), Constant(42, lltype.Signed)]: - for v2 in [varoftype(lltype.Signed), Constant(43, lltype.Signed)]: + for v1 in [varoftype(lltype.Signed), const(42)]: + for v2 in [varoftype(lltype.Signed), const(43)]: op = SpaceOperation('int_add_nonneg_ovf', [v1, v2], v3) oplist = Transformer(FakeCPU()).rewrite_operation(op) op0, op1 = oplist @@ -218,7 +230,7 @@ def get_direct_call_op(argtypes, restype): FUNC = lltype.FuncType(argtypes, restype) fnptr = lltype.functionptr(FUNC, "g") # no graph - c_fnptr = Constant(fnptr, concretetype=lltype.typeOf(fnptr)) + c_fnptr = const(fnptr) vars = [varoftype(TYPE) for TYPE in argtypes] v_result = varoftype(restype) op = SpaceOperation('direct_call', [c_fnptr] + vars, v_result) @@ -465,7 +477,7 @@ v1 = varoftype(lltype.Signed) v2 = varoftype(lltype.Signed) v3 = varoftype(lltype.Bool) - c0 = Constant(0, lltype.Signed) + c0 = const(0) # for opname, reducedname in [('int_eq', 'int_is_zero'), ('int_ne', 'int_is_true')]: @@ -488,7 +500,7 @@ v1 = varoftype(rclass.OBJECTPTR) v2 = varoftype(rclass.OBJECTPTR) v3 = varoftype(lltype.Bool) - c0 = Constant(lltype.nullptr(rclass.OBJECT), rclass.OBJECTPTR) + c0 = const(lltype.nullptr(rclass.OBJECT)) # for opname, reducedname in [('ptr_eq', 'ptr_iszero'), ('ptr_ne', 'ptr_nonzero')]: @@ -511,7 +523,7 @@ v1 = varoftype(rclass.NONGCOBJECTPTR) v2 = varoftype(rclass.NONGCOBJECTPTR) v3 = varoftype(lltype.Bool) - c0 = Constant(lltype.nullptr(rclass.NONGCOBJECT), rclass.NONGCOBJECTPTR) + c0 = const(lltype.nullptr(rclass.NONGCOBJECT)) # for opname, reducedname in [('ptr_eq', 'int_is_zero'), ('ptr_ne', 'int_is_true')]: @@ -656,3 +668,83 @@ oplist = tr.rewrite_operation(op) assert oplist[0].opname == 'inline_call_ir_i' assert oplist[0].args[0] == 'somejitcode' + +def test_str_newstr(): + c_STR = Constant(rstr.STR, lltype.Void) + c_flavor = Constant({'flavor': 'gc'}, lltype.Void) + v1 = varoftype(lltype.Signed) + v2 = varoftype(lltype.Ptr(rstr.STR)) + op = SpaceOperation('malloc_varsize', [c_STR, c_flavor, v1], v2) + op1 = Transformer().rewrite_operation(op) + assert op1.opname == 'newstr' + assert op1.args == [v1] + assert op1.result == v2 + +def test_str_concat(): + py.test.xfail('later') + # test that the oopspec is present and correctly transformed + PSTR = lltype.Ptr(rstr.STR) + FUNC = lltype.FuncType([PSTR, PSTR], PSTR) + func = lltype.functionptr(FUNC, 'll_strconcat', + _callable=rstr.LLHelpers.ll_strconcat) + v1 = varoftype(PSTR) + v2 = varoftype(PSTR) + v3 = varoftype(PSTR) + op = SpaceOperation('direct_call', [const(func), v1, v2], v3) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = tr.rewrite_operation(op) + assert op1.opname == 'residual_call_r_r' + assert list(op1.args[2]) == [v1, v2] + assert op1.result == v3 + +def test_str_stringslice_startonly(): + # test that the oopspec is present and correctly transformed + PSTR = lltype.Ptr(rstr.STR) + INT = lltype.Signed + FUNC = lltype.FuncType([PSTR, INT], PSTR) + func = lltype.functionptr(FUNC, 'll_stringslice_startonly', + _callable=rstr.LLHelpers.ll_stringslice_startonly) + v1 = varoftype(PSTR) + v2 = varoftype(INT) + v3 = varoftype(PSTR) + op = SpaceOperation('direct_call', [const(func), v1, v2], v3) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = tr.rewrite_operation(op) + assert op1.opname == 'residual_call_ir_r' + assert list(op1.args[2]) == [v2] + assert list(op1.args[3]) == [v1] + assert op1.result == v3 + +def test_str_stringslice_startstop(): + # test that the oopspec is present and correctly transformed + PSTR = lltype.Ptr(rstr.STR) + INT = lltype.Signed + FUNC = lltype.FuncType([PSTR, INT, INT], PSTR) + func = lltype.functionptr(FUNC, 'll_stringslice_startstop', + _callable=rstr.LLHelpers.ll_stringslice_startstop) + v1 = varoftype(PSTR) + v2 = varoftype(INT) + v3 = varoftype(INT) + v4 = varoftype(PSTR) + op = SpaceOperation('direct_call', [const(func), v1, v2, v3], v4) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = tr.rewrite_operation(op) + assert op1.opname == 'residual_call_ir_r' + assert list(op1.args[2]) == [v2, v3] + assert list(op1.args[3]) == [v1] + assert op1.result == v4 + +def test_str_stringslice_minusone(): + # test that the oopspec is present and correctly transformed + PSTR = lltype.Ptr(rstr.STR) + FUNC = lltype.FuncType([PSTR], PSTR) + func = lltype.functionptr(FUNC, 'll_stringslice_minusone', + _callable=rstr.LLHelpers.ll_stringslice_minusone) + v1 = varoftype(PSTR) + v2 = varoftype(PSTR) + op = SpaceOperation('direct_call', [const(func), v1], v2) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = tr.rewrite_operation(op) + assert op1.opname == 'residual_call_r_r' + assert list(op1.args[2]) == [v1] + assert op1.result == v2 Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizefindnode.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizefindnode.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizefindnode.py Wed Sep 22 11:52:51 2010 @@ -172,7 +172,7 @@ find_nodes_PTR_EQ = find_nodes_no_escape find_nodes_PTR_NE = find_nodes_no_escape - find_nodes_INSTANCEOF = find_nodes_no_escape + ##find_nodes_INSTANCEOF = find_nodes_no_escape find_nodes_GUARD_NONNULL = find_nodes_no_escape find_nodes_GUARD_ISNULL = find_nodes_no_escape Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py Wed Sep 22 11:52:51 2010 @@ -126,6 +126,12 @@ def setitem(self, index, value): raise NotImplementedError + def getchar(self): + raise NotImplementedError + + def setchar(self, charvalue): + raise NotImplementedError + class ConstantValue(OptValue): def __init__(self, box): self.make_constant(box) Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/rewrite.py Wed Sep 22 11:52:51 2010 @@ -307,17 +307,17 @@ def optimize_PTR_EQ(self, op): self._optimize_oois_ooisnot(op, False) - def optimize_INSTANCEOF(self, op): - value = self.getvalue(op.args[0]) - realclassbox = value.get_constant_class(self.optimizer.cpu) - if realclassbox is not None: - checkclassbox = self.optimizer.cpu.typedescr2classbox(op.descr) - result = self.optimizer.cpu.ts.subclassOf(self.optimizer.cpu, - realclassbox, - checkclassbox) - self.make_constant_int(op.result, result) - return - self.emit_operation(op) +## def optimize_INSTANCEOF(self, op): +## value = self.getvalue(op.args[0]) +## realclassbox = value.get_constant_class(self.optimizer.cpu) +## if realclassbox is not None: +## checkclassbox = self.optimizer.cpu.typedescr2classbox(op.descr) +## result = self.optimizer.cpu.ts.subclassOf(self.optimizer.cpu, +## realclassbox, +## checkclassbox) +## self.make_constant_int(op.result, result) +## return +## self.emit_operation(op) optimize_ops = _findall(OptRewrite, 'optimize_') Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py Wed Sep 22 11:52:51 2010 @@ -188,12 +188,43 @@ itemboxes.append(itemvalue.get_key_box()) modifier.register_virtual_fields(self.keybox, itemboxes) for itemvalue in self._items: - if itemvalue is not self.constvalue: - itemvalue.get_args_for_fail(modifier) + itemvalue.get_args_for_fail(modifier) def _make_virtual(self, modifier): return modifier.make_varray(self.arraydescr) +class VStringLength1Value(AbstractVirtualValue): + + def __init__(self, optimizer, keybox, source_op=None): + AbstractVirtualValue.__init__(self, optimizer, keybox, source_op) + self._char = CVAL_ZERO + + def getchar(self): + return self._char + + def setchar(self, charvalue): + assert isinstance(charvalue, OptValue) + self._char = charvalue + + def _really_force(self): + assert self.source_op is not None + newoperations = self.optimizer.newoperations + newoperations.append(self.source_op) + self.box = box = self.source_op.result + charbox = self._char.force_box() + op = ResOperation(rop.STRSETITEM, + [box, ConstInt(0), charbox], None) + newoperations.append(op) + + def get_args_for_fail(self, modifier): + if self.box is None and not modifier.already_seen_virtual(self.keybox): + charboxes = [self._char.get_key_box()] + modifier.register_virtual_fields(self.keybox, charboxes) + self._char.get_args_for_fail(modifier) + + def _make_virtual(self, modifier): + return modifier.make_vstring() + class __extend__(SpecNode): def setup_virtual_node(self, optimizer, box, newinputargs): raise NotImplementedError @@ -282,6 +313,11 @@ self.make_equal_to(box, vvalue) return vvalue + def make_vstring_length1(self, box, source_op=None): + vvalue = VStringLength1Value(self.optimizer, box, source_op) + self.make_equal_to(box, vvalue) + return vvalue + def optimize_JUMP(self, op): orgop = self.optimizer.loop.operations[-1] exitargs = [] @@ -358,8 +394,8 @@ def optimize_SETFIELD_GC(self, op): value = self.getvalue(op.args[0]) - fieldvalue = self.getvalue(op.args[1]) if value.is_virtual(): + fieldvalue = self.getvalue(op.args[1]) value.setfield(op.descr, fieldvalue) else: value.ensure_nonnull() @@ -444,6 +480,44 @@ self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result, descr)) + def optimize_NEWSTR(self, op): + length_box = self.get_constant_box(op.args[0]) + if length_box and length_box.getint() == 1: # NEWSTR(1) + # if the original 'op' did not have a ConstInt as argument, + # build a new one with the ConstInt argument + if not isinstance(op.args[0], ConstInt): + op = ResOperation(rop.NEWSTR, [CONST_1], op.result) + self.make_vstring_length1(op.result, op) + else: + self.emit_operation(op) + + def optimize_STRSETITEM(self, op): + value = self.getvalue(op.args[0]) + if value.is_virtual(): + charvalue = self.getvalue(op.args[2]) + value.setchar(charvalue) + else: + value.ensure_nonnull() + self.emit_operation(op) + + def optimize_STRGETITEM(self, op): + value = self.getvalue(op.args[0]) + if value.is_virtual(): + charvalue = value.getchar() + assert charvalue is not None + self.make_equal_to(op.result, charvalue) + else: + value.ensure_nonnull() + self.emit_operation(op) + + def optimize_STRLEN(self, op): + value = self.getvalue(op.args[0]) + if value.is_virtual(): + self.make_constant_int(op.result, 1) + else: + value.ensure_nonnull() + self.emit_operation(op) + def propagate_forward(self, op): opnum = op.opnum for value, func in optimize_ops: Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeutil.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeutil.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeutil.py Wed Sep 22 11:52:51 2010 @@ -14,6 +14,11 @@ def _findall(Class, name_prefix): result = [] + for name in dir(Class): + if name.startswith(name_prefix): + opname = name[len(name_prefix):] + if opname.isupper(): + assert hasattr(resoperation.rop, opname) for value, name in resoperation.opname.items(): if hasattr(Class, name_prefix + name): result.append((value, getattr(Class, name_prefix + name))) Modified: pypy/branch/jit-str/pypy/jit/metainterp/resume.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/resume.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/resume.py Wed Sep 22 11:52:51 2010 @@ -253,6 +253,9 @@ def make_varray(self, arraydescr): return VArrayInfo(arraydescr) + def make_vstring(self): + return VStringInfo() + def register_virtual_fields(self, virtualbox, fieldboxes): tagged = self.liveboxes_from_env.get(virtualbox, UNASSIGNEDVIRTUAL) self.liveboxes[virtualbox] = tagged @@ -486,6 +489,27 @@ for i in self.fieldnums: debug_print("\t\t", str(untag(i))) +class VStringInfo(AbstractVirtualInfo): + def __init__(self): + pass + #self.fieldnums = ... + + @specialize.argtype(1) + def allocate(self, decoder): + length = len(self.fieldnums) + return decoder.allocate_string(length) + + @specialize.argtype(1) + def setfields(self, decoder, string): + length = len(self.fieldnums) + for i in range(length): + decoder.strsetitem(string, i, self.fieldnums[i]) + + def debug_prints(self): + debug_print("\tvstringinfo") + for i in self.fieldnums: + debug_print("\t\t", str(untag(i))) + # ____________________________________________________________ class AbstractResumeDataReader(object): @@ -622,6 +646,9 @@ return self.metainterp.execute_and_record(rop.NEW_ARRAY, arraydescr, ConstInt(length)) + def allocate_string(self, length): + return self.metainterp.execute_and_record(rop.NEWSTR, ConstInt(length)) + def setfield(self, descr, structbox, fieldnum): if descr.is_pointer_field(): kind = REF @@ -839,6 +866,9 @@ def allocate_array(self, arraydescr, length): return self.cpu.bh_new_array(arraydescr, length) + def allocate_string(self, length): + return self.cpu.bh_newstr(length) + def setfield(self, descr, struct, fieldnum): if descr.is_pointer_field(): newvalue = self.decode_ref(fieldnum) Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py Wed Sep 22 11:52:51 2010 @@ -1,5 +1,5 @@ import py -from pypy.rlib.jit import JitDriver +from pypy.rlib.jit import JitDriver, dont_look_inside, we_are_jitted from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rpython.ootypesystem import ootype from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin @@ -72,6 +72,104 @@ res = self.meta_interp(f, [6, 10]) assert res == 6 + def test_char2string_pure(self): + for dochr in [chr, ]: #unichr]: + jitdriver = JitDriver(greens = [], reds = ['n']) + @dont_look_inside + def escape(x): + pass + def f(n): + while n > 0: + jitdriver.can_enter_jit(n=n) + jitdriver.jit_merge_point(n=n) + s = dochr(n) + if not we_are_jitted(): + s += s # forces to be a string + if n > 100: + escape(s) + n -= 1 + return 42 + self.meta_interp(f, [6]) + self.check_loops(newstr=0, strsetitem=0, strlen=0, + newunicode=0, unicodesetitem=0, unicodelen=0) + + def test_char2string_escape(self): + for dochr in [chr, ]: #unichr]: + jitdriver = JitDriver(greens = [], reds = ['n', 'total']) + @dont_look_inside + def escape(x): + return ord(x[0]) + def f(n): + total = 0 + while n > 0: + jitdriver.can_enter_jit(n=n, total=total) + jitdriver.jit_merge_point(n=n, total=total) + s = dochr(n) + if not we_are_jitted(): + s += s # forces to be a string + total += escape(s) + n -= 1 + return total + res = self.meta_interp(f, [6]) + assert res == 21 + + def test_char2string2char(self): + for dochr in [chr, ]: #unichr]: + jitdriver = JitDriver(greens = [], reds = ['m', 'total']) + def f(m): + total = 0 + while m > 0: + jitdriver.can_enter_jit(m=m, total=total) + jitdriver.jit_merge_point(m=m, total=total) + string = dochr(m) + if m > 100: + string += string # forces to be a string + # read back the character + c = string[0] + total += ord(c) + m -= 1 + return total + res = self.meta_interp(f, [6]) + assert res == 21 + self.check_loops(newstr=0, strgetitem=0, strsetitem=0, strlen=0, + newunicode=0, unicodegetitem=0, unicodesetitem=0, + unicodelen=0) + + def test_slice_startonly(self): + if 1: # xxx unicode + jitdriver = JitDriver(greens = [], reds = ['m', 'total']) + def f(m): + total = 0 + while m >= 0: + jitdriver.can_enter_jit(m=m, total=total) + jitdriver.jit_merge_point(m=m, total=total) + string = 's0dgkwn349tXOGIEQR!'[m:] + c = string[2*m] + total += ord(c) + m -= 1 + return total + res = self.meta_interp(f, [6]) + assert res == sum(map(ord, 'sgn9OE!')) + self.check_loops(call=0, call_pure=0, + newstr=0, strgetitem=1, strsetitem=0, strlen=0) + + def test_strconcat_pure(self): + for dochr in [chr, ]: #unichr]: + @dont_look_inside + def escape(x): + pass + def f(n, m): + s = dochr(n) + dochr(m) + if not we_are_jitted(): + escape(s) + return 42 + self.interp_operations(f, [65, 66]) + py.test.xfail() + self.check_operations_history(newstr=0, strsetitem=0, + newunicode=0, unicodesetitem=0, + call=0, call_pure=0) + + class TestOOtype(StringTests, OOJitMixin): CALL = "oosend" CALL_PURE = "oosend_pure" Modified: pypy/branch/jit-str/pypy/jit/metainterp/warmstate.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/warmstate.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/warmstate.py Wed Sep 22 11:52:51 2010 @@ -83,6 +83,9 @@ return history.ConstFloat(value) else: return history.BoxFloat(value) + elif isinstance(value, (str, unicode)): + assert len(value) == 1 # must be a character + value = ord(value) else: value = intmask(value) if in_const_box: Modified: pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py ============================================================================== --- pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py (original) +++ pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py Wed Sep 22 11:52:51 2010 @@ -65,8 +65,8 @@ dst = llmemory.cast_ptr_to_adr(dst) + _str_ofs(dststart) llmemory.raw_memcopy(src, dst, llmemory.sizeof(CHAR_TP) * length) copy_string_contents._always_inline_ = True - copy_string_contents.oopspec = ( - '%s.copy_contents(src, dst, srcstart, dststart, length)' % name) + #copy_string_contents.oopspec = ( + # '%s.copy_contents(src, dst, srcstart, dststart, length)' % name) return func_with_new_name(copy_string_contents, 'copy_%s_contents' % name) copy_string_contents = _new_copy_contents_fun(STR, Char, 'string') @@ -326,6 +326,7 @@ s1.copy_contents(s1, newstr, 0, 0, len1) s1.copy_contents(s2, newstr, 0, len1, len2) return newstr + #ll_strconcat.oopspec = 'stroruni.concat(s1, s2)' @purefunction def ll_strip(s, ch, left, right): @@ -693,7 +694,6 @@ i += 1 return result - @purefunction def ll_stringslice_startonly(s1, start): len1 = len(s1.chars) newstr = s1.malloc(len1 - start) @@ -702,8 +702,8 @@ assert start >= 0 s1.copy_contents(s1, newstr, start, 0, lgt) return newstr + ll_stringslice_startonly.oopspec = 'stroruni.slice_startonly(s1, start)' - @purefunction def ll_stringslice_startstop(s1, start, stop): if stop >= len(s1.chars): if start == 0: @@ -715,14 +715,16 @@ assert lgt >= 0 s1.copy_contents(s1, newstr, start, 0, lgt) return newstr + ll_stringslice_startstop.oopspec = ('stroruni.slice_startstop(s1, ' + 'start, stop)') - @purefunction def ll_stringslice_minusone(s1): newlen = len(s1.chars) - 1 newstr = s1.malloc(newlen) assert newlen >= 0 s1.copy_contents(s1, newstr, 0, 0, newlen) return newstr + ll_stringslice_minusone.oopspec = 'stroruni.slice_minusone(s1)' def ll_split_chr(LIST, s, c): chars = s.chars From afa at codespeak.net Wed Sep 22 12:58:38 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Wed, 22 Sep 2010 12:58:38 +0200 (CEST) Subject: [pypy-svn] r77264 - pypy/branch/fast-forward/pypy/module/_multiprocessing Message-ID: <20100922105838.B19A1282C03@codespeak.net> Author: afa Date: Wed Sep 22 12:58:36 2010 New Revision: 77264 Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_win32.py Log: Fix translation of _multiprocessing module on Windows Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py (original) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_connection.py Wed Sep 22 12:58:36 2010 @@ -3,7 +3,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.error import ( OperationError, wrap_oserror, operationerrfmt) -from pypy.rpython.lltypesystem import rffi, lltype +from pypy.rpython.lltypesystem import rffi, lltype, llmemory from pypy.rlib.rarithmetic import intmask import sys, os @@ -348,15 +348,16 @@ rwin32.GetLastError() == ERROR_NO_SYSTEM_RESOURCES): raise operationerrfmt( space.w_ValueError, - "Cannot send %ld bytes over connection", size) + "Cannot send %d bytes over connection", size) finally: rffi.free_charp(charp) lltype.free(written_ptr, flavor='raw') def do_recv_string(self, space, maxlength): from pypy.module._multiprocessing.interp_win32 import ( - _ReadFile) + _ReadFile, _PeekNamedPipe, ERROR_BROKEN_PIPE, ERROR_MORE_DATA) from pypy.rlib import rwin32 + from pypy.interpreter.error import wrap_windowserror read_ptr = lltype.malloc(rffi.CArrayPtr(rwin32.DWORD).TO, 1, flavor='raw') @@ -371,31 +372,34 @@ err = rwin32.GetLastError() if err == ERROR_BROKEN_PIPE: - return MP_END_OF_FILE + return MP_END_OF_FILE, lltype.nullptr(rffi.CCHARP.TO) elif err != ERROR_MORE_DATA: - return MP_STANDARD_ERROR + raise wrap_windowserror(space, WindowsError(err, "_ReadFile")) # More data... if not _PeekNamedPipe(self.handle, rffi.NULL, 0, - rffi.NULL, rffi.NULL, left_ptr): - return MP_STANDARD_ERROR + lltype.nullptr(rwin32.LPDWORD.TO), + lltype.nullptr(rwin32.LPDWORD.TO), + left_ptr): + raise wrap_windowserror(space, rwin32.lastWindowsError()) - length = read_ptr[0] + left_ptr[0] + length = intmask(read_ptr[0] + left_ptr[0]) if length > maxlength: return MP_BAD_MESSAGE_LENGTH, lltype.nullptr(rffi.CCHARP.TO) newbuf = lltype.malloc(rffi.CCHARP.TO, length + 1, flavor='raw') - raw_memcopy(self.buffer, newbuf, read_ptr[0]) + for i in range(read_ptr[0]): + newbuf[i] = self.buffer[i] result = _ReadFile(self.handle, rffi.ptradd(newbuf, read_ptr[0]), left_ptr[0], read_ptr, rffi.NULL) - if result: - assert read_ptr[0] == left_ptr[0] - return length, newbuf - else: + if not result: rffi.free_charp(newbuf) - return MP_STANDARD_ERROR, lltype.nullptr(rffi.CCHARP.TO) + raise wrap_windowserror(space, rwin32.lastWindowsError()) + + assert read_ptr[0] == left_ptr[0] + return length, newbuf finally: lltype.free(read_ptr, flavor='raw') Modified: pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_win32.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_win32.py (original) +++ pypy/branch/fast-forward/pypy/module/_multiprocessing/interp_win32.py Wed Sep 22 12:58:36 2010 @@ -1,6 +1,6 @@ from pypy.interpreter.gateway import ObjSpace, W_Root, unwrap_spec, interp2app from pypy.interpreter.function import StaticMethod -from pypy.interpreter.error import wrap_windowserror +from pypy.interpreter.error import wrap_windowserror, OperationError from pypy.rlib import rwin32 from pypy.rlib.rarithmetic import r_uint from pypy.rpython.lltypesystem import rffi, lltype @@ -14,7 +14,7 @@ PIPE_UNLIMITED_INSTANCES NMPWAIT_WAIT_FOREVER ERROR_PIPE_CONNECTED ERROR_SEM_TIMEOUT ERROR_PIPE_BUSY - ERROR_NO_SYSTEM_RESOURCES + ERROR_NO_SYSTEM_RESOURCES ERROR_BROKEN_PIPE ERROR_MORE_DATA """.split() class CConfig: @@ -51,6 +51,14 @@ rwin32.LPDWORD, rwin32.LPDWORD, rwin32.LPDWORD], rwin32.BOOL) +_PeekNamedPipe = rwin32.winexternal( + 'PeekNamedPipe', [ + rwin32.HANDLE, + rffi.VOIDP, + rwin32.DWORD, + rwin32.LPDWORD, rwin32.LPDWORD, rwin32.LPDWORD], + rwin32.BOOL) + _CreateFile = rwin32.winexternal( 'CreateFileA', [ rwin32.LPCSTR, @@ -81,7 +89,7 @@ raise wrap_windowserror(space, rwin32.lastWindowsError()) def GetLastError(space): - return space.wrap(rwin32.lastWindowsError()) + return space.wrap(rwin32.GetLastError()) @unwrap_spec(ObjSpace, str, r_uint, r_uint, r_uint, r_uint, r_uint, r_uint, W_Root) def CreateNamedPipe(space, name, openmode, pipemode, maxinstances, From antocuni at codespeak.net Wed Sep 22 14:17:19 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Wed, 22 Sep 2010 14:17:19 +0200 (CEST) Subject: [pypy-svn] r77265 - in pypy/trunk/pypy: jit/backend/cli jit/backend/llgraph jit/backend/llsupport jit/backend/llsupport/test jit/backend/llvm jit/backend/test jit/backend/x86 jit/backend/x86/test jit/metainterp jit/metainterp/optimizeopt jit/metainterp/test jit/tool module/array/benchmark module/array/test Message-ID: <20100922121719.9521B282BFB@codespeak.net> Author: antocuni Date: Wed Sep 22 14:17:16 2010 New Revision: 77265 Added: pypy/trunk/pypy/jit/metainterp/test/test_resoperation.py - copied unchanged from r77260, pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_resoperation.py Modified: pypy/trunk/pypy/jit/backend/cli/method.py pypy/trunk/pypy/jit/backend/cli/runner.py pypy/trunk/pypy/jit/backend/llgraph/runner.py pypy/trunk/pypy/jit/backend/llsupport/gc.py pypy/trunk/pypy/jit/backend/llsupport/regalloc.py pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py pypy/trunk/pypy/jit/backend/llvm/compile.py pypy/trunk/pypy/jit/backend/test/runner_test.py pypy/trunk/pypy/jit/backend/test/test_ll_random.py pypy/trunk/pypy/jit/backend/test/test_random.py pypy/trunk/pypy/jit/backend/x86/assembler.py pypy/trunk/pypy/jit/backend/x86/regalloc.py pypy/trunk/pypy/jit/backend/x86/test/test_recompilation.py pypy/trunk/pypy/jit/backend/x86/test/test_regalloc.py pypy/trunk/pypy/jit/backend/x86/test/test_runner.py pypy/trunk/pypy/jit/metainterp/compile.py pypy/trunk/pypy/jit/metainterp/graphpage.py pypy/trunk/pypy/jit/metainterp/history.py pypy/trunk/pypy/jit/metainterp/logger.py pypy/trunk/pypy/jit/metainterp/optimize.py pypy/trunk/pypy/jit/metainterp/optimizefindnode.py pypy/trunk/pypy/jit/metainterp/optimizeopt/heap.py pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py (contents, props changed) pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py pypy/trunk/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/trunk/pypy/jit/metainterp/pyjitpl.py pypy/trunk/pypy/jit/metainterp/resoperation.py pypy/trunk/pypy/jit/metainterp/simple_optimize.py pypy/trunk/pypy/jit/metainterp/test/oparser.py pypy/trunk/pypy/jit/metainterp/test/test_basic.py pypy/trunk/pypy/jit/metainterp/test/test_logger.py pypy/trunk/pypy/jit/metainterp/test/test_loop.py pypy/trunk/pypy/jit/metainterp/test/test_oparser.py pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py pypy/trunk/pypy/jit/metainterp/test/test_recursive.py pypy/trunk/pypy/jit/metainterp/test/test_virtualref.py pypy/trunk/pypy/jit/tool/showstats.py pypy/trunk/pypy/module/array/benchmark/Makefile (props changed) pypy/trunk/pypy/module/array/benchmark/intimg.c (props changed) pypy/trunk/pypy/module/array/benchmark/intimgtst.c (props changed) pypy/trunk/pypy/module/array/benchmark/intimgtst.py (props changed) pypy/trunk/pypy/module/array/benchmark/loop.c (props changed) pypy/trunk/pypy/module/array/benchmark/sum.c (props changed) pypy/trunk/pypy/module/array/benchmark/sumtst.c (props changed) pypy/trunk/pypy/module/array/benchmark/sumtst.py (props changed) pypy/trunk/pypy/module/array/test/test_array_old.py (props changed) Log: Merge the resoperation-refactoring branch. The idea is to reduce the memory needed by ResOperations, by putting certain fields (e.g. a variable-sized list of arguments, or descr, or fail_args) only on the operations that actually need them. The benchmarks don't show any speedup or slowdown, but it's still worth merging because it saves a bit of memory: list of operations are thrown away immediately right now, but we might want to keep them in the future. Modified: pypy/trunk/pypy/jit/backend/cli/method.py ============================================================================== --- pypy/trunk/pypy/jit/backend/cli/method.py (original) +++ pypy/trunk/pypy/jit/backend/cli/method.py Wed Sep 22 14:17:16 2010 @@ -207,9 +207,9 @@ def _collect_types(self, operations, box2classes): for op in operations: - if op.opnum in (rop.GETFIELD_GC, rop.SETFIELD_GC): + if op.getopnum() in (rop.GETFIELD_GC, rop.SETFIELD_GC): box = op.args[0] - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.FieldDescr) box2classes.setdefault(box, []).append(descr.selfclass) if op in self.cliloop.guard2ops: @@ -335,7 +335,7 @@ while self.i < N: op = oplist[self.i] self.emit_debug(op.repr()) - func = self.operations[op.opnum] + func = self.operations[op.getopnum()] assert func is not None func(self, op) self.i += 1 @@ -357,10 +357,10 @@ assert op.is_guard() if op in self.cliloop.guard2ops: inputargs, suboperations = self.cliloop.guard2ops[op] - self.match_var_fox_boxes(op.fail_args, inputargs) + self.match_var_fox_boxes(op.getfailargs(), inputargs) self.emit_operations(suboperations) else: - self.emit_return_failed_op(op, op.fail_args) + self.emit_return_failed_op(op, op.getfailargs()) def emit_end(self): assert self.branches == [] @@ -410,7 +410,7 @@ def emit_ovf_op(self, op, emit_op): next_op = self.oplist[self.i+1] - if next_op.opnum == rop.GUARD_NO_OVERFLOW: + if next_op.getopnum() == rop.GUARD_NO_OVERFLOW: self.i += 1 self.emit_ovf_op_and_guard(op, next_op, emit_op) return @@ -544,7 +544,7 @@ self.emit_guard_overflow_impl(op, OpCodes.Brfalse) def emit_op_jump(self, op): - target_token = op.descr + target_token = op.getdescr() assert isinstance(target_token, LoopToken) if target_token.cliloop is self.cliloop: # jump to the beginning of the loop @@ -586,7 +586,7 @@ self.store_result(op) def emit_op_instanceof(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) clitype = descr.get_clitype() op.args[0].load(self) @@ -604,7 +604,7 @@ self.store_result(op) def emit_op_call_impl(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.StaticMethDescr) delegate_type = descr.get_delegate_clitype() meth_invoke = descr.get_meth_info() @@ -619,7 +619,7 @@ emit_op_call_pure = emit_op_call def emit_op_oosend(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.MethDescr) clitype = descr.get_self_clitype() methinfo = descr.get_meth_info() @@ -639,7 +639,7 @@ self.store_result(op) def emit_op_getfield_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.FieldDescr) clitype = descr.get_self_clitype() fieldinfo = descr.get_field_info() @@ -653,7 +653,7 @@ emit_op_getfield_gc_pure = emit_op_getfield_gc def emit_op_setfield_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.FieldDescr) clitype = descr.get_self_clitype() fieldinfo = descr.get_field_info() @@ -665,7 +665,7 @@ self.il.Emit(OpCodes.Stfld, fieldinfo) def emit_op_getarrayitem_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) clitype = descr.get_array_clitype() itemtype = descr.get_clitype() @@ -678,7 +678,7 @@ emit_op_getarrayitem_gc_pure = emit_op_getarrayitem_gc def emit_op_setarrayitem_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) clitype = descr.get_array_clitype() itemtype = descr.get_clitype() @@ -689,7 +689,7 @@ self.il.Emit(OpCodes.Stelem, itemtype) def emit_op_arraylen_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) clitype = descr.get_array_clitype() op.args[0].load(self) @@ -698,7 +698,7 @@ self.store_result(op) def emit_op_new_array(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) item_clitype = descr.get_clitype() if item_clitype is None: Modified: pypy/trunk/pypy/jit/backend/cli/runner.py ============================================================================== --- pypy/trunk/pypy/jit/backend/cli/runner.py (original) +++ pypy/trunk/pypy/jit/backend/cli/runner.py Wed Sep 22 14:17:16 2010 @@ -105,7 +105,7 @@ def _attach_token_to_faildescrs(self, token, operations): for op in operations: if op.is_guard(): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, AbstractFailDescr) descr._loop_token = token descr._guard_op = op @@ -136,7 +136,7 @@ func = cliloop.funcbox.holder.GetFunc() func(self.get_inputargs()) op = self.failing_ops[self.inputargs.get_failed_op()] - return op.descr + return op.getdescr() def set_future_value_int(self, index, intvalue): self.get_inputargs().set_int(index, intvalue) Modified: pypy/trunk/pypy/jit/backend/llgraph/runner.py ============================================================================== --- pypy/trunk/pypy/jit/backend/llgraph/runner.py (original) +++ pypy/trunk/pypy/jit/backend/llgraph/runner.py Wed Sep 22 14:17:16 2010 @@ -151,16 +151,17 @@ def _compile_operations(self, c, operations, var2index): for op in operations: - llimpl.compile_add(c, op.opnum) - descr = op.descr + llimpl.compile_add(c, op.getopnum()) + descr = op.getdescr() if isinstance(descr, Descr): llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo) - if isinstance(descr, history.LoopToken) and op.opnum != rop.JUMP: + if isinstance(descr, history.LoopToken) and op.getopnum() != rop.JUMP: llimpl.compile_add_loop_token(c, descr) if self.is_oo and isinstance(descr, (OODescr, MethDescr)): # hack hack, not rpython - c._obj.externalobj.operations[-1].descr = descr - for x in op.args: + c._obj.externalobj.operations[-1].setdescr(descr) + for i in range(op.numargs()): + x = op.getarg(i) if isinstance(x, history.Box): llimpl.compile_add_var(c, var2index[x]) elif isinstance(x, history.ConstInt): @@ -173,10 +174,10 @@ raise Exception("'%s' args contain: %r" % (op.getopname(), x)) if op.is_guard(): - faildescr = op.descr + faildescr = op.getdescr() assert isinstance(faildescr, history.AbstractFailDescr) faildescr._fail_args_types = [] - for box in op.fail_args: + for box in op.getfailargs(): if box is None: type = history.HOLE else: @@ -185,7 +186,7 @@ fail_index = self.get_fail_descr_number(faildescr) index = llimpl.compile_add_fail(c, fail_index) faildescr._compiled_fail = c, index - for box in op.fail_args: + for box in op.getfailargs(): if box is not None: llimpl.compile_add_fail_arg(c, var2index[box]) else: @@ -203,13 +204,13 @@ x)) op = operations[-1] assert op.is_final() - if op.opnum == rop.JUMP: - targettoken = op.descr + if op.getopnum() == rop.JUMP: + targettoken = op.getdescr() assert isinstance(targettoken, history.LoopToken) compiled_version = targettoken._llgraph_compiled_version llimpl.compile_add_jump_target(c, compiled_version) - elif op.opnum == rop.FINISH: - faildescr = op.descr + elif op.getopnum() == rop.FINISH: + faildescr = op.getdescr() index = self.get_fail_descr_number(faildescr) llimpl.compile_add_fail(c, index) else: @@ -280,7 +281,7 @@ def __init__(self, *args, **kwds): BaseCPU.__init__(self, *args, **kwds) self.fielddescrof_vtable = self.fielddescrof(rclass.OBJECT, 'typeptr') - + def fielddescrof(self, S, fieldname): ofs, size = symbolic.get_field_token(S, fieldname) token = history.getkind(getattr(S, fieldname)) @@ -504,7 +505,7 @@ return ootype.cast_to_object(e) else: return ootype.NULL - + def get_exc_value(self): if llimpl._last_exception: earg = llimpl._last_exception.args[1] @@ -580,7 +581,7 @@ x = descr.callmeth(selfbox, argboxes) # XXX: return None if METH.RESULT is Void return x - + def make_getargs(ARGS): argsiter = unrolling_iterable(ARGS) @@ -612,7 +613,7 @@ class KeyManager(object): """ Helper class to convert arbitrary dictionary keys to integers. - """ + """ def __init__(self): self.keys = {} @@ -695,7 +696,7 @@ self.ARRAY = ARRAY = ootype.Array(TYPE) def create(): return boxresult(TYPE, ootype.new(TYPE)) - + def create_array(lengthbox): n = lengthbox.getint() return boxresult(ARRAY, ootype.oonewarray(ARRAY, n)) @@ -757,7 +758,7 @@ obj = objbox.getref(TYPE) value = unwrap(T, valuebox) setattr(obj, fieldname, value) - + self.getfield = getfield self.setfield = setfield self._is_pointer_field = (history.getkind(T) == 'ref') Modified: pypy/trunk/pypy/jit/backend/llsupport/gc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/llsupport/gc.py (original) +++ pypy/trunk/pypy/jit/backend/llsupport/gc.py Wed Sep 22 14:17:16 2010 @@ -559,12 +559,12 @@ # newops = [] for op in operations: - if op.opnum == rop.DEBUG_MERGE_POINT: + if op.getopnum() == rop.DEBUG_MERGE_POINT: continue # ---------- replace ConstPtrs with GETFIELD_RAW ---------- # xxx some performance issue here - for i in range(len(op.args)): - v = op.args[i] + for i in range(op.numargs()): + v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): addr = self.gcrefs.get_address_of_gcref(v.value) # ^^^even for non-movable objects, to record their presence @@ -574,23 +574,21 @@ newops.append(ResOperation(rop.GETFIELD_RAW, [ConstInt(addr)], box, self.single_gcref_descr)) - op.args[i] = box + op.setarg(i, box) # ---------- write barrier for SETFIELD_GC ---------- - if op.opnum == rop.SETFIELD_GC: - v = op.args[1] + if op.getopnum() == rop.SETFIELD_GC: + v = op.getarg(1) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.args[0], v) - op = ResOperation(rop.SETFIELD_RAW, op.args, None, - descr=op.descr) + self._gen_write_barrier(newops, op.getarg(0), v) + op = op.copy_and_change(rop.SETFIELD_RAW) # ---------- write barrier for SETARRAYITEM_GC ---------- - if op.opnum == rop.SETARRAYITEM_GC: - v = op.args[2] + if op.getopnum() == rop.SETARRAYITEM_GC: + v = op.getarg(2) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.args[0], v) - op = ResOperation(rop.SETARRAYITEM_RAW, op.args, None, - descr=op.descr) + self._gen_write_barrier(newops, op.getarg(0), v) + op = op.copy_and_change(rop.SETARRAYITEM_RAW) # ---------- newops.append(op) del operations[:] Modified: pypy/trunk/pypy/jit/backend/llsupport/regalloc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/llsupport/regalloc.py (original) +++ pypy/trunk/pypy/jit/backend/llsupport/regalloc.py Wed Sep 22 14:17:16 2010 @@ -81,6 +81,10 @@ for v in vars: self.possibly_free_var(v) + def possibly_free_vars_for_op(self, op): + for i in range(op.numargs()): + self.possibly_free_var(op.getarg(i)) + def _check_invariants(self): if not we_are_translated(): # make sure no duplicates Modified: pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py (original) +++ pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py Wed Sep 22 14:17:16 2010 @@ -258,18 +258,18 @@ gc_ll_descr._gen_write_barrier(newops, v_base, v_value) assert llop1.record == [] assert len(newops) == 1 - assert newops[0].opnum == rop.COND_CALL_GC_WB - assert newops[0].args[0] == v_base - assert newops[0].args[1] == v_value + assert newops[0].getopnum() == rop.COND_CALL_GC_WB + assert newops[0].getarg(0) == v_base + assert newops[0].getarg(1) == v_value assert newops[0].result is None - wbdescr = newops[0].descr + wbdescr = newops[0].getdescr() assert isinstance(wbdescr.jit_wb_if_flag, int) assert isinstance(wbdescr.jit_wb_if_flag_byteofs, int) assert isinstance(wbdescr.jit_wb_if_flag_singlebyte, int) def test_get_rid_of_debug_merge_point(self): operations = [ - ResOperation(rop.DEBUG_MERGE_POINT, [], None), + ResOperation(rop.DEBUG_MERGE_POINT, ['dummy'], None), ] gc_ll_descr = self.gc_ll_descr gc_ll_descr.rewrite_assembler(None, operations) @@ -298,13 +298,14 @@ gc_ll_descr.gcrefs = MyFakeGCRefList() gc_ll_descr.rewrite_assembler(MyFakeCPU(), operations) assert len(operations) == 2 - assert operations[0].opnum == rop.GETFIELD_RAW - assert operations[0].args == [ConstInt(43)] - assert operations[0].descr == gc_ll_descr.single_gcref_descr + assert operations[0].getopnum() == rop.GETFIELD_RAW + assert operations[0].getarg(0) == ConstInt(43) + assert operations[0].getdescr() == gc_ll_descr.single_gcref_descr v_box = operations[0].result assert isinstance(v_box, BoxPtr) - assert operations[1].opnum == rop.PTR_EQ - assert operations[1].args == [v_random_box, v_box] + assert operations[1].getopnum() == rop.PTR_EQ + assert operations[1].getarg(0) == v_random_box + assert operations[1].getarg(1) == v_box assert operations[1].result == v_result def test_rewrite_assembler_1_cannot_move(self): @@ -336,8 +337,9 @@ finally: rgc.can_move = old_can_move assert len(operations) == 1 - assert operations[0].opnum == rop.PTR_EQ - assert operations[0].args == [v_random_box, ConstPtr(s_gcref)] + assert operations[0].getopnum() == rop.PTR_EQ + assert operations[0].getarg(0) == v_random_box + assert operations[0].getarg(1) == ConstPtr(s_gcref) assert operations[0].result == v_result # check that s_gcref gets added to the list anyway, to make sure # that the GC sees it @@ -356,14 +358,15 @@ gc_ll_descr.rewrite_assembler(self.fake_cpu, operations) assert len(operations) == 2 # - assert operations[0].opnum == rop.COND_CALL_GC_WB - assert operations[0].args[0] == v_base - assert operations[0].args[1] == v_value + assert operations[0].getopnum() == rop.COND_CALL_GC_WB + assert operations[0].getarg(0) == v_base + assert operations[0].getarg(1) == v_value assert operations[0].result is None # - assert operations[1].opnum == rop.SETFIELD_RAW - assert operations[1].args == [v_base, v_value] - assert operations[1].descr == field_descr + assert operations[1].getopnum() == rop.SETFIELD_RAW + assert operations[1].getarg(0) == v_base + assert operations[1].getarg(1) == v_value + assert operations[1].getdescr() == field_descr def test_rewrite_assembler_3(self): # check write barriers before SETARRAYITEM_GC @@ -379,11 +382,13 @@ gc_ll_descr.rewrite_assembler(self.fake_cpu, operations) assert len(operations) == 2 # - assert operations[0].opnum == rop.COND_CALL_GC_WB - assert operations[0].args[0] == v_base - assert operations[0].args[1] == v_value + assert operations[0].getopnum() == rop.COND_CALL_GC_WB + assert operations[0].getarg(0) == v_base + assert operations[0].getarg(1) == v_value assert operations[0].result is None # - assert operations[1].opnum == rop.SETARRAYITEM_RAW - assert operations[1].args == [v_base, v_index, v_value] - assert operations[1].descr == array_descr + assert operations[1].getopnum() == rop.SETARRAYITEM_RAW + assert operations[1].getarg(0) == v_base + assert operations[1].getarg(1) == v_index + assert operations[1].getarg(2) == v_value + assert operations[1].getdescr() == array_descr Modified: pypy/trunk/pypy/jit/backend/llvm/compile.py ============================================================================== --- pypy/trunk/pypy/jit/backend/llvm/compile.py (original) +++ pypy/trunk/pypy/jit/backend/llvm/compile.py Wed Sep 22 14:17:16 2010 @@ -107,7 +107,7 @@ # store away the exception into self.backup_exc_xxx, *unless* the # branch starts with a further GUARD_EXCEPTION/GUARD_NO_EXCEPTION. if exc: - opnum = operations[0].opnum + opnum = operations[0].getopnum() if opnum not in (rop.GUARD_EXCEPTION, rop.GUARD_NO_EXCEPTION): self._store_away_exception() # Normal handling of the operations follows. @@ -115,7 +115,7 @@ self._generate_op(op) def _generate_op(self, op): - opnum = op.opnum + opnum = op.getopnum() for i, name in all_operations: if opnum == i: meth = getattr(self, name) @@ -475,7 +475,7 @@ return location def generate_GETFIELD_GC(self, op): - loc = self._generate_field_gep(op.args[0], op.descr) + loc = self._generate_field_gep(op.args[0], op.getdescr()) self.vars[op.result] = llvm_rffi.LLVMBuildLoad(self.builder, loc, "") generate_GETFIELD_GC_PURE = generate_GETFIELD_GC @@ -483,7 +483,7 @@ generate_GETFIELD_RAW_PURE = generate_GETFIELD_GC def generate_SETFIELD_GC(self, op): - fielddescr = op.descr + fielddescr = op.getdescr() loc = self._generate_field_gep(op.args[0], fielddescr) assert isinstance(fielddescr, FieldDescr) getarg = self.cpu.getarg_by_index[fielddescr.size_index] @@ -491,7 +491,7 @@ llvm_rffi.LLVMBuildStore(self.builder, value_ref, loc, "") def generate_CALL(self, op): - calldescr = op.descr + calldescr = op.getdescr() assert isinstance(calldescr, CallDescr) ty_function_ptr = self.cpu.get_calldescr_ty_function_ptr(calldescr) v = op.args[0] @@ -579,7 +579,7 @@ self.vars[op.result] = llvm_rffi.LLVMBuildLoad(self.builder, loc, "") def generate_ARRAYLEN_GC(self, op): - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, ArrayDescr) self._generate_len(op, arraydescr.ty_array_ptr, self.cpu.const_array_index_length) @@ -598,7 +598,7 @@ return location def _generate_array_gep(self, op): - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, ArrayDescr) location = self._generate_gep(op, arraydescr.ty_array_ptr, self.cpu.const_array_index_array) @@ -612,7 +612,7 @@ def generate_SETARRAYITEM_GC(self, op): loc = self._generate_array_gep(op) - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, ArrayDescr) getarg = self.cpu.getarg_by_index[arraydescr.itemsize_index] value_ref = getarg(self, op.args[2]) @@ -660,7 +660,7 @@ return res def generate_NEW(self, op): - sizedescr = op.descr + sizedescr = op.getdescr() assert isinstance(sizedescr, SizeDescr) res = self._generate_new(self.cpu._make_const_int(sizedescr.size)) self.vars[op.result] = res @@ -695,7 +695,7 @@ self.vars[op.result] = res def generate_NEW_ARRAY(self, op): - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, ArrayDescr) self._generate_new_array(op, arraydescr.ty_array_ptr, self.cpu._make_const_int(arraydescr.itemsize), Modified: pypy/trunk/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/trunk/pypy/jit/backend/test/runner_test.py (original) +++ pypy/trunk/pypy/jit/backend/test/runner_test.py Wed Sep 22 14:17:16 2010 @@ -1,5 +1,6 @@ import py, sys, random, os, struct, operator from pypy.jit.metainterp.history import (AbstractFailDescr, + AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, LoopToken, @@ -39,7 +40,7 @@ else: raise NotImplementedError(box) res = self.cpu.execute_token(looptoken) - if res is operations[-1].descr: + if res is operations[-1].getdescr(): self.guard_failed = False else: self.guard_failed = True @@ -74,10 +75,11 @@ ResOperation(rop.FINISH, results, None, descr=BasicFailDescr(0))] if operations[0].is_guard(): - operations[0].fail_args = [] + operations[0].setfailargs([]) if not descr: descr = BasicFailDescr(1) - operations[0].descr = descr + if descr is not None: + operations[0].setdescr(descr) inputargs = [] for box in valueboxes: if isinstance(box, Box) and box not in inputargs: @@ -116,7 +118,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [i1] + operations[2].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) @@ -137,7 +139,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [None, None, i1, None] + operations[2].setfailargs([None, None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) @@ -160,7 +162,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [i1] + operations[2].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) self.cpu.compile_loop(inputargs, operations, looptoken) @@ -184,7 +186,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [i1] + operations[2].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -194,7 +196,7 @@ ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), ResOperation(rop.JUMP, [i1b], None, descr=looptoken), ] - bridge[1].fail_args = [i1b] + bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge) @@ -218,7 +220,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [None, i1, None] + operations[2].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -228,7 +230,7 @@ ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), ResOperation(rop.JUMP, [i1b], None, descr=looptoken), ] - bridge[1].fail_args = [i1b] + bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge) @@ -251,7 +253,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [None, i1, None] + operations[2].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) @@ -317,7 +319,7 @@ descr=BasicFailDescr()), ResOperation(rop.JUMP, [z, t], None, descr=looptoken), ] - operations[-2].fail_args = [t, z] + operations[-2].setfailargs([t, z]) cpu.compile_loop([x, y], operations, looptoken) self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 10) @@ -363,7 +365,7 @@ ResOperation(rop.FINISH, [v_res], None, descr=BasicFailDescr(2)), ] - ops[1].fail_args = [] + ops[1].setfailargs([]) else: v_exc = self.cpu.ts.BoxRef() ops = [ @@ -372,7 +374,7 @@ descr=BasicFailDescr(1)), ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2)), ] - ops[1].fail_args = [v_res] + ops[1].setfailargs([v_res]) # looptoken = LoopToken() self.cpu.compile_loop([v1, v2], ops, looptoken) @@ -909,8 +911,8 @@ ResOperation(rop.GUARD_TRUE, [i2], None), ResOperation(rop.JUMP, jumpargs, None, descr=looptoken), ] - operations[2].fail_args = inputargs[:] - operations[2].descr = faildescr + operations[2].setfailargs(inputargs[:]) + operations[2].setdescr(faildescr) # self.cpu.compile_loop(inputargs, operations, looptoken) # @@ -975,7 +977,7 @@ ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), ResOperation(rop.FINISH, fboxes, None, descr=faildescr2), ] - operations[-2].fail_args = fboxes + operations[-2].setfailargs(fboxes) looptoken = LoopToken() self.cpu.compile_loop(fboxes, operations, looptoken) @@ -1098,7 +1100,7 @@ descr=BasicFailDescr(4)), ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(5))] - operations[1].fail_args = [] + operations[1].setfailargs([]) looptoken = LoopToken() # Use "set" to unique-ify inputargs unique_testcase_list = list(set(testcase)) @@ -1412,7 +1414,7 @@ FUNC = self.FuncType([lltype.Ptr(S), lltype.Signed], lltype.Void) func_ptr = llhelper(lltype.Ptr(FUNC), func_void) funcbox = self.get_funcbox(self.cpu, func_ptr) - class WriteBarrierDescr: + class WriteBarrierDescr(AbstractDescr): jit_wb_if_flag = 4096 jit_wb_if_flag_byteofs = struct.pack("i", 4096).index('\x10') jit_wb_if_flag_singlebyte = 0x10 @@ -1462,7 +1464,7 @@ ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] - ops[2].fail_args = [i1, i0] + ops[2].setfailargs([i1, i0]) looptoken = LoopToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, 20) @@ -1506,7 +1508,7 @@ ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] - ops[2].fail_args = [i1, i2, i0] + ops[2].setfailargs([i1, i2, i0]) looptoken = LoopToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, 20) @@ -1551,7 +1553,7 @@ ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [f2], None, descr=BasicFailDescr(0)) ] - ops[2].fail_args = [i1, f2, i0] + ops[2].setfailargs([i1, f2, i0]) looptoken = LoopToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, 20) @@ -1824,7 +1826,7 @@ f2 = float_add(f0, f1) finish(f2)''' loop = parse(ops) - done_number = self.cpu.get_fail_descr_number(loop.operations[-1].descr) + done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) looptoken = LoopToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) Modified: pypy/trunk/pypy/jit/backend/test/test_ll_random.py ============================================================================== --- pypy/trunk/pypy/jit/backend/test/test_ll_random.py (original) +++ pypy/trunk/pypy/jit/backend/test/test_ll_random.py Wed Sep 22 14:17:16 2010 @@ -464,7 +464,7 @@ self.put(builder, args, descr) op = ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=BasicFailDescr()) - op.fail_args = fail_subset + op.setfailargs(fail_subset) builder.loop.operations.append(op) # 5. Non raising-call and GUARD_EXCEPTION @@ -486,7 +486,7 @@ exc_box = ConstAddr(llmemory.cast_ptr_to_adr(vtableptr), builder.cpu) op = ResOperation(rop.GUARD_EXCEPTION, [exc_box], BoxPtr(), descr=BasicFailDescr()) - op.fail_args = builder.subset_of_intvars(r) + op.setfailargs(builder.subset_of_intvars(r)) op._exc_box = None builder.should_fail_by = op builder.guard_op = op @@ -507,7 +507,7 @@ exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu) op = ResOperation(rop.GUARD_EXCEPTION, [exc_box], BoxPtr(), descr=BasicFailDescr()) - op.fail_args = fail_subset + op.setfailargs(fail_subset) builder.loop.operations.append(op) # 4. raising call and guard_no_exception @@ -524,7 +524,7 @@ op = ResOperation(rop.GUARD_NO_EXCEPTION, [], BoxPtr(), descr=BasicFailDescr()) op._exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu) - op.fail_args = builder.subset_of_intvars(r) + op.setfailargs(builder.subset_of_intvars(r)) builder.should_fail_by = op builder.guard_op = op builder.loop.operations.append(op) @@ -548,7 +548,7 @@ op = ResOperation(rop.GUARD_EXCEPTION, [other_box], BoxPtr(), descr=BasicFailDescr()) op._exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu) - op.fail_args = builder.subset_of_intvars(r) + op.setfailargs(builder.subset_of_intvars(r)) builder.should_fail_by = op builder.guard_op = op builder.loop.operations.append(op) Modified: pypy/trunk/pypy/jit/backend/test/test_random.py ============================================================================== --- pypy/trunk/pypy/jit/backend/test/test_random.py (original) +++ pypy/trunk/pypy/jit/backend/test/test_random.py Wed Sep 22 14:17:16 2010 @@ -86,7 +86,7 @@ def process_operation(self, s, op, names, subops): args = [] - for v in op.args: + for v in op.getarglist(): if v in names: args.append(names[v]) ## elif isinstance(v, ConstAddr): @@ -105,11 +105,11 @@ args.append('ConstInt(%d)' % v.value) else: raise NotImplementedError(v) - if op.descr is None: + if op.getdescr() is None: descrstr = '' else: try: - descrstr = ', ' + op.descr._random_info + descrstr = ', ' + op.getdescr()._random_info except AttributeError: descrstr = ', descr=...' print >>s, ' ResOperation(rop.%s, [%s], %s%s),' % ( @@ -129,7 +129,7 @@ def print_loop_prebuilt(ops): for op in ops: - for arg in op.args: + for arg in op.getarglist(): if isinstance(arg, ConstPtr): if arg not in names: writevar(arg, 'const_ptr') @@ -191,7 +191,7 @@ if self.should_fail_by is None: fail_args = self.loop.operations[-1].args else: - fail_args = self.should_fail_by.fail_args + fail_args = self.should_fail_by.getfailargs() for i, v in enumerate(fail_args): if isinstance(v, (BoxFloat, ConstFloat)): print >>s, (' assert cpu.get_latest_value_float(%d) == %r' @@ -284,8 +284,8 @@ builder.intvars[:] = original_intvars else: op = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) - op.descr = BasicFailDescr() - op.fail_args = fail_subset + op.setdescr(BasicFailDescr()) + op.setfailargs(fail_subset) builder.loop.operations.append(op) class BinaryOvfOperation(AbstractOvfOperation, BinaryOperation): @@ -345,8 +345,8 @@ def produce_into(self, builder, r): op, passing = self.gen_guard(builder, r) builder.loop.operations.append(op) - op.descr = BasicFailDescr() - op.fail_args = builder.subset_of_intvars(r) + op.setdescr(BasicFailDescr()) + op.setfailargs(builder.subset_of_intvars(r)) if not passing: builder.should_fail_by = op builder.guard_op = op @@ -553,7 +553,7 @@ endvars = [] used_later = {} for op in loop.operations: - for v in op.args: + for v in op.getarglist(): used_later[v] = True for v in startvars: if v not in used_later: @@ -577,11 +577,11 @@ def get_fail_args(self): if self.should_fail_by.is_guard(): - assert self.should_fail_by.fail_args is not None - return self.should_fail_by.fail_args + assert self.should_fail_by.getfailargs() is not None + return self.should_fail_by.getfailargs() else: - assert self.should_fail_by.opnum == rop.FINISH - return self.should_fail_by.args + assert self.should_fail_by.getopnum() == rop.FINISH + return self.should_fail_by.getarglist() def clear_state(self): for v, S, fields in self.prebuilt_ptr_consts: @@ -606,7 +606,7 @@ else: raise NotImplementedError(box) fail = cpu.execute_token(self.loop.token) - assert fail is self.should_fail_by.descr + assert fail is self.should_fail_by.getdescr() for i, v in enumerate(self.get_fail_args()): if isinstance(v, (BoxFloat, ConstFloat)): value = cpu.get_latest_value_float(i) @@ -620,7 +620,7 @@ exc = cpu.grab_exc_value() if (self.guard_op is not None and self.guard_op.is_guard_exception()): - if self.guard_op.opnum == rop.GUARD_NO_EXCEPTION: + if self.guard_op.getopnum() == rop.GUARD_NO_EXCEPTION: assert exc else: assert not exc @@ -633,26 +633,26 @@ else: op = ResOperation(rop.GUARD_EXCEPTION, [guard_op._exc_box], BoxPtr()) - op.descr = BasicFailDescr() - op.fail_args = [] + op.setdescr(BasicFailDescr()) + op.setfailargs([]) return op if self.dont_generate_more: return False r = self.r guard_op = self.guard_op - fail_args = guard_op.fail_args - fail_descr = guard_op.descr + fail_args = guard_op.getfailargs() + fail_descr = guard_op.getdescr() op = self.should_fail_by - if not op.fail_args: + if not op.getfailargs(): return False # generate the branch: a sequence of operations that ends in a FINISH subloop = DummyLoop([]) if guard_op.is_guard_exception(): subloop.operations.append(exc_handling(guard_op)) bridge_builder = self.builder.fork(self.builder.cpu, subloop, - op.fail_args[:]) - self.generate_ops(bridge_builder, r, subloop, op.fail_args[:]) + op.getfailargs()[:]) + self.generate_ops(bridge_builder, r, subloop, op.getfailargs()[:]) # note that 'self.guard_op' now points to the guard that will fail in # this new bridge, while 'guard_op' still points to the guard that # has just failed. Modified: pypy/trunk/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/assembler.py (original) +++ pypy/trunk/pypy/jit/backend/x86/assembler.py Wed Sep 22 14:17:16 2010 @@ -390,8 +390,8 @@ def _find_debug_merge_point(self, operations): for op in operations: - if op.opnum == rop.DEBUG_MERGE_POINT: - funcname = op.args[0]._get_str() + if op.getopnum() == rop.DEBUG_MERGE_POINT: + funcname = op.getarg(0)._get_str() break else: funcname = "" % len(self.loop_run_counters) @@ -684,25 +684,25 @@ self.mc.POP(loc) def regalloc_perform(self, op, arglocs, resloc): - genop_list[op.opnum](self, op, arglocs, resloc) + genop_list[op.getopnum()](self, op, arglocs, resloc) def regalloc_perform_discard(self, op, arglocs): - genop_discard_list[op.opnum](self, op, arglocs) + genop_discard_list[op.getopnum()](self, op, arglocs) def regalloc_perform_with_guard(self, op, guard_op, faillocs, arglocs, resloc, current_depths): - faildescr = guard_op.descr + faildescr = guard_op.getdescr() assert isinstance(faildescr, AbstractFailDescr) faildescr._x86_current_depths = current_depths - failargs = guard_op.fail_args - guard_opnum = guard_op.opnum + failargs = guard_op.getfailargs() + guard_opnum = guard_op.getopnum() guard_token = self.implement_guard_recovery(guard_opnum, faildescr, failargs, faillocs) if op is None: dispatch_opnum = guard_opnum else: - dispatch_opnum = op.opnum + dispatch_opnum = op.getopnum() res = genop_guard_list[dispatch_opnum](self, op, guard_op, guard_token, arglocs, resloc) faildescr._x86_adr_jump_offset = res @@ -728,7 +728,7 @@ def _cmpop(cond, rev_cond): def genop_cmp(self, op, arglocs, result_loc): rl = result_loc.lowest8bits() - if isinstance(op.args[0], Const): + if isinstance(op.getarg(0), Const): self.mc.CMP(arglocs[1], arglocs[0]) self.mc.SET_ir(rx86.Conditions[rev_cond], rl.value) else: @@ -758,8 +758,8 @@ def _cmpop_guard(cond, rev_cond, false_cond, false_rev_cond): def genop_cmp_guard(self, op, guard_op, guard_token, arglocs, result_loc): - guard_opnum = guard_op.opnum - if isinstance(op.args[0], Const): + guard_opnum = guard_op.getopnum() + if isinstance(op.getarg(0), Const): self.mc.CMP(arglocs[1], arglocs[0]) if guard_opnum == rop.GUARD_FALSE: return self.implement_guard(guard_token, rev_cond) @@ -776,7 +776,7 @@ def _cmpop_guard_float(cond, false_cond, need_jp): def genop_cmp_guard_float(self, op, guard_op, guard_token, arglocs, result_loc): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() self.mc.UCOMISD(arglocs[0], arglocs[1]) # 16 is enough space for the rel8 jumps below and the rel32 # jump in implement_guard @@ -945,7 +945,7 @@ genop_guard_float_ge = _cmpop_guard_float("AE", "B", False) def genop_guard_float_ne(self, op, guard_op, guard_token, arglocs, result_loc): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() self.mc.UCOMISD(arglocs[0], arglocs[1]) # 16 is enough space for the rel8 jumps below and the rel32 # jump in implement_guard @@ -973,7 +973,7 @@ self.mc.CVTSI2SD(resloc, arglocs[0]) def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() self.mc.CMP(arglocs[0], imm(0)) if guard_opnum == rop.GUARD_TRUE: return self.implement_guard(guard_token, 'Z') @@ -987,7 +987,7 @@ self.mc.MOVZX8(resloc, rl) def genop_guard_int_is_zero(self, op, guard_op, guard_token, arglocs, resloc): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() self.mc.CMP(arglocs[0], imm(0)) if guard_opnum == rop.GUARD_TRUE: return self.implement_guard(guard_token, 'NZ') @@ -1123,7 +1123,7 @@ assert isinstance(baseofs, ImmedLoc) assert isinstance(scale_loc, ImmedLoc) dest_addr = AddressLoc(base_loc, ofs_loc, scale_loc.value, baseofs.value) - if op.args[2].type == FLOAT: + if op.getarg(2).type == FLOAT: self.mc.MOVSD(dest_addr, value_loc) else: if IS_X86_64 and scale_loc.value == 3: @@ -1219,7 +1219,7 @@ return addr def _gen_guard_overflow(self, guard_op, guard_token): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() if guard_opnum == rop.GUARD_NO_OVERFLOW: return self.implement_guard(guard_token, 'O') elif guard_opnum == rop.GUARD_OVERFLOW: @@ -1247,8 +1247,8 @@ genop_guard_guard_isnull = genop_guard_guard_false def genop_guard_guard_value(self, ign_1, guard_op, guard_token, locs, ign_2): - if guard_op.args[0].type == FLOAT: - assert guard_op.args[1].type == FLOAT + if guard_op.getarg(0).type == FLOAT: + assert guard_op.getarg(1).type == FLOAT self.mc.UCOMISD(locs[0], locs[1]) else: self.mc.CMP(locs[0], locs[1]) @@ -1639,8 +1639,8 @@ assert isinstance(sizeloc, ImmedLoc) size = sizeloc.value - if isinstance(op.args[0], Const): - x = imm(op.args[0].getint()) + if isinstance(op.getarg(0), Const): + x = imm(op.getarg(0).getint()) else: x = arglocs[1] if x is eax: @@ -1659,7 +1659,7 @@ def genop_guard_call_may_force(self, op, guard_op, guard_token, arglocs, result_loc): - faildescr = guard_op.descr + faildescr = guard_op.getdescr() fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) self.genop_call(op, arglocs, result_loc) @@ -1668,10 +1668,10 @@ def genop_guard_call_assembler(self, op, guard_op, guard_token, arglocs, result_loc): - faildescr = guard_op.descr + faildescr = guard_op.getdescr() fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) - descr = op.descr + descr = op.getdescr() assert isinstance(descr, LoopToken) assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) # @@ -1756,7 +1756,7 @@ def genop_discard_cond_call_gc_wb(self, op, arglocs): # use 'mc._mc' directly instead of 'mc', to avoid # bad surprizes if the code buffer is mostly full - descr = op.descr + descr = op.getdescr() if we_are_translated(): cls = self.cpu.gc_ll_descr.has_write_barrier_class() assert cls is not None and isinstance(descr, cls) Modified: pypy/trunk/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/trunk/pypy/jit/backend/x86/regalloc.py Wed Sep 22 14:17:16 2010 @@ -234,6 +234,12 @@ else: self.rm.possibly_free_var(var) + def possibly_free_vars_for_op(self, op): + for i in range(op.numargs()): + var = op.getarg(i) + if var is not None: # xxx kludgy + self.possibly_free_var(var) + def possibly_free_vars(self, vars): for var in vars: if var is not None: # xxx kludgy @@ -262,12 +268,12 @@ selected_reg, need_lower_byte) def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.opnum != rop.JUMP or jump.descr is not looptoken: + if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: loop_consts = {} else: loop_consts = {} for i in range(len(inputargs)): - if inputargs[i] is jump.args[i]: + if inputargs[i] is jump.getarg(i): loop_consts[inputargs[i]] = i return loop_consts @@ -312,7 +318,7 @@ self.assembler.regalloc_perform(op, arglocs, result_loc) def locs_for_fail(self, guard_op): - return [self.loc(v) for v in guard_op.fail_args] + return [self.loc(v) for v in guard_op.getfailargs()] def perform_with_guard(self, op, guard_op, arglocs, result_loc): faillocs = self.locs_for_fail(guard_op) @@ -324,7 +330,7 @@ current_depths) if op.result is not None: self.possibly_free_var(op.result) - self.possibly_free_vars(guard_op.fail_args) + self.possibly_free_vars(guard_op.getfailargs()) def perform_guard(self, guard_op, arglocs, result_loc): faillocs = self.locs_for_fail(guard_op) @@ -338,7 +344,7 @@ self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs, result_loc, current_depths) - self.possibly_free_vars(guard_op.fail_args) + self.possibly_free_vars(guard_op.getfailargs()) def PerformDiscard(self, op, arglocs): if not we_are_translated(): @@ -346,24 +352,24 @@ self.assembler.regalloc_perform_discard(op, arglocs) def can_merge_with_next_guard(self, op, i, operations): - if op.opnum == rop.CALL_MAY_FORCE or op.opnum == rop.CALL_ASSEMBLER: - assert operations[i + 1].opnum == rop.GUARD_NOT_FORCED + if op.getopnum() == rop.CALL_MAY_FORCE or op.getopnum() == rop.CALL_ASSEMBLER: + assert operations[i + 1].getopnum() == rop.GUARD_NOT_FORCED return True if not op.is_comparison(): if op.is_ovf(): - if (operations[i + 1].opnum != rop.GUARD_NO_OVERFLOW and - operations[i + 1].opnum != rop.GUARD_OVERFLOW): + if (operations[i + 1].getopnum() != rop.GUARD_NO_OVERFLOW and + operations[i + 1].getopnum() != rop.GUARD_OVERFLOW): print "int_xxx_ovf not followed by guard_(no)_overflow" raise AssertionError return True return False - if (operations[i + 1].opnum != rop.GUARD_TRUE and - operations[i + 1].opnum != rop.GUARD_FALSE): + if (operations[i + 1].getopnum() != rop.GUARD_TRUE and + operations[i + 1].getopnum() != rop.GUARD_FALSE): return False - if operations[i + 1].args[0] is not op.result: + if operations[i + 1].getarg(0) is not op.result: return False if (self.longevity[op.result][1] > i + 1 or - op.result in operations[i + 1].fail_args): + op.result in operations[i + 1].getfailargs()): return False return True @@ -376,13 +382,13 @@ self.xrm.position = i if op.has_no_side_effect() and op.result not in self.longevity: i += 1 - self.possibly_free_vars(op.args) + self.possibly_free_vars_for_op(op) continue if self.can_merge_with_next_guard(op, i, operations): - oplist_with_guard[op.opnum](self, op, operations[i + 1]) + oplist_with_guard[op.getopnum()](self, op, operations[i + 1]) i += 1 else: - oplist[op.opnum](self, op) + oplist[op.getopnum()](self, op) if op.result is not None: self.possibly_free_var(op.result) self.rm._check_invariants() @@ -402,19 +408,20 @@ op = operations[i] if op.result is not None: start_live[op.result] = i - for arg in op.args: + for j in range(op.numargs()): + arg = op.getarg(j) if isinstance(arg, Box): if arg not in start_live: - print "Bogus arg in operation %d at %d" % (op.opnum, i) + print "Bogus arg in operation %d at %d" % (op.getopnum(), i) raise AssertionError longevity[arg] = (start_live[arg], i) if op.is_guard(): - for arg in op.fail_args: + for arg in op.getfailargs(): if arg is None: # hole continue assert isinstance(arg, Box) if arg not in start_live: - print "Bogus arg in guard %d at %d" % (op.opnum, i) + print "Bogus arg in guard %d at %d" % (op.getopnum(), i) raise AssertionError longevity[arg] = (start_live[arg], i) for arg in inputargs: @@ -432,9 +439,9 @@ return self.rm.loc(v) def _consider_guard(self, op): - loc = self.rm.make_sure_var_in_reg(op.args[0]) + loc = self.rm.make_sure_var_in_reg(op.getarg(0)) self.perform_guard(op, [loc], None) - self.rm.possibly_free_var(op.args[0]) + self.rm.possibly_free_var(op.getarg(0)) consider_guard_true = _consider_guard consider_guard_false = _consider_guard @@ -442,52 +449,54 @@ consider_guard_isnull = _consider_guard def consider_finish(self, op): - locs = [self.loc(v) for v in op.args] - locs_are_ref = [v.type == REF for v in op.args] - fail_index = self.assembler.cpu.get_fail_descr_number(op.descr) + locs = [self.loc(op.getarg(i)) for i in range(op.numargs())] + locs_are_ref = [op.getarg(i).type == REF for i in range(op.numargs())] + fail_index = self.assembler.cpu.get_fail_descr_number(op.getdescr()) self.assembler.generate_failure(fail_index, locs, self.exc, locs_are_ref) - self.possibly_free_vars(op.args) + self.possibly_free_vars_for_op(op) def consider_guard_no_exception(self, op): self.perform_guard(op, [], None) def consider_guard_exception(self, op): - loc = self.rm.make_sure_var_in_reg(op.args[0]) + loc = self.rm.make_sure_var_in_reg(op.getarg(0)) box = TempBox() - loc1 = self.rm.force_allocate_reg(box, op.args) + args = op.getarglist() + loc1 = self.rm.force_allocate_reg(box, args) if op.result in self.longevity: # this means, is it ever used - resloc = self.rm.force_allocate_reg(op.result, op.args + [box]) + resloc = self.rm.force_allocate_reg(op.result, args + [box]) else: resloc = None self.perform_guard(op, [loc, loc1], resloc) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) self.rm.possibly_free_var(box) consider_guard_no_overflow = consider_guard_no_exception consider_guard_overflow = consider_guard_no_exception def consider_guard_value(self, op): - x = self.make_sure_var_in_reg(op.args[0]) - y = self.loc(op.args[1]) + x = self.make_sure_var_in_reg(op.getarg(0)) + y = self.loc(op.getarg(1)) self.perform_guard(op, [x, y], None) - self.possibly_free_vars(op.args) + self.possibly_free_vars_for_op(op) def consider_guard_class(self, op): - assert isinstance(op.args[0], Box) - x = self.rm.make_sure_var_in_reg(op.args[0]) - y = self.loc(op.args[1]) + assert isinstance(op.getarg(0), Box) + x = self.rm.make_sure_var_in_reg(op.getarg(0)) + y = self.loc(op.getarg(1)) self.perform_guard(op, [x, y], None) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) consider_guard_nonnull_class = consider_guard_class def _consider_binop_part(self, op): - x = op.args[0] - argloc = self.loc(op.args[1]) - loc = self.rm.force_result_in_reg(op.result, x, op.args) - self.rm.possibly_free_var(op.args[1]) + x = op.getarg(0) + argloc = self.loc(op.getarg(1)) + args = op.getarglist() + loc = self.rm.force_result_in_reg(op.result, x, args) + self.rm.possibly_free_var(op.getarg(1)) return loc, argloc def _consider_binop(self, op): @@ -510,26 +519,27 @@ consider_int_add_ovf = _consider_binop_with_guard def consider_int_neg(self, op): - res = self.rm.force_result_in_reg(op.result, op.args[0]) + res = self.rm.force_result_in_reg(op.result, op.getarg(0)) self.Perform(op, [res], res) consider_int_invert = consider_int_neg def consider_int_lshift(self, op): - if isinstance(op.args[1], Const): - loc2 = self.rm.convert_to_imm(op.args[1]) + if isinstance(op.getarg(1), Const): + loc2 = self.rm.convert_to_imm(op.getarg(1)) else: - loc2 = self.rm.make_sure_var_in_reg(op.args[1], selected_reg=ecx) - loc1 = self.rm.force_result_in_reg(op.result, op.args[0], op.args) + loc2 = self.rm.make_sure_var_in_reg(op.getarg(1), selected_reg=ecx) + args = op.getarglist() + loc1 = self.rm.force_result_in_reg(op.result, op.getarg(0), args) self.Perform(op, [loc1, loc2], loc1) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) consider_int_rshift = consider_int_lshift consider_uint_rshift = consider_int_lshift def _consider_int_div_or_mod(self, op, resultreg, trashreg): - l0 = self.rm.make_sure_var_in_reg(op.args[0], selected_reg=eax) - l1 = self.rm.make_sure_var_in_reg(op.args[1], selected_reg=ecx) + l0 = self.rm.make_sure_var_in_reg(op.getarg(0), selected_reg=eax) + l1 = self.rm.make_sure_var_in_reg(op.getarg(1), selected_reg=ecx) l2 = self.rm.force_allocate_reg(op.result, selected_reg=resultreg) # the register (eax or edx) not holding what we are looking for # will be just trash after that operation @@ -538,7 +548,7 @@ assert l0 is eax assert l1 is ecx assert l2 is resultreg - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) self.rm.possibly_free_var(tmpvar) def consider_int_mod(self, op): @@ -552,17 +562,18 @@ consider_uint_floordiv = consider_int_floordiv def _consider_compop(self, op, guard_op): - vx = op.args[0] - vy = op.args[1] + vx = op.getarg(0) + vy = op.getarg(1) arglocs = [self.loc(vx), self.loc(vy)] if (vx in self.rm.reg_bindings or vy in self.rm.reg_bindings or isinstance(vx, Const) or isinstance(vy, Const)): pass else: arglocs[0] = self.rm.make_sure_var_in_reg(vx) - self.rm.possibly_free_vars(op.args) + args = op.getarglist() + self.rm.possibly_free_vars(args) if guard_op is None: - loc = self.rm.force_allocate_reg(op.result, op.args, + loc = self.rm.force_allocate_reg(op.result, args, need_lower_byte=True) self.Perform(op, arglocs, loc) else: @@ -582,10 +593,11 @@ consider_ptr_ne = _consider_compop def _consider_float_op(self, op): - loc1 = self.xrm.loc(op.args[1]) - loc0 = self.xrm.force_result_in_reg(op.result, op.args[0], op.args) + loc1 = self.xrm.loc(op.getarg(1)) + args = op.getarglist() + loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0), args) self.Perform(op, [loc0, loc1], loc0) - self.xrm.possibly_free_vars(op.args) + self.xrm.possibly_free_vars_for_op(op) consider_float_add = _consider_float_op consider_float_sub = _consider_float_op @@ -593,11 +605,12 @@ consider_float_truediv = _consider_float_op def _consider_float_cmp(self, op, guard_op): - loc0 = self.xrm.make_sure_var_in_reg(op.args[0], op.args, + args = op.getarglist() + loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0), args, imm_fine=False) - loc1 = self.xrm.loc(op.args[1]) + loc1 = self.xrm.loc(op.getarg(1)) arglocs = [loc0, loc1] - self.xrm.possibly_free_vars(op.args) + self.xrm.possibly_free_vars_for_op(op) if guard_op is None: res = self.rm.force_allocate_reg(op.result, need_lower_byte=True) self.Perform(op, arglocs, res) @@ -612,26 +625,26 @@ consider_float_ge = _consider_float_cmp def consider_float_neg(self, op): - loc0 = self.xrm.force_result_in_reg(op.result, op.args[0]) + loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0)) self.Perform(op, [loc0], loc0) - self.xrm.possibly_free_var(op.args[0]) + self.xrm.possibly_free_var(op.getarg(0)) def consider_float_abs(self, op): - loc0 = self.xrm.force_result_in_reg(op.result, op.args[0]) + loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0)) self.Perform(op, [loc0], loc0) - self.xrm.possibly_free_var(op.args[0]) + self.xrm.possibly_free_var(op.getarg(0)) def consider_cast_float_to_int(self, op): - loc0 = self.xrm.make_sure_var_in_reg(op.args[0], imm_fine=False) + loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0), imm_fine=False) loc1 = self.rm.force_allocate_reg(op.result) self.Perform(op, [loc0], loc1) - self.xrm.possibly_free_var(op.args[0]) + self.xrm.possibly_free_var(op.getarg(0)) def consider_cast_int_to_float(self, op): - loc0 = self.rm.loc(op.args[0]) + loc0 = self.rm.loc(op.getarg(0)) loc1 = self.xrm.force_allocate_reg(op.result) self.Perform(op, [loc0], loc1) - self.rm.possibly_free_var(op.args[0]) + self.rm.possibly_free_var(op.getarg(0)) def _call(self, op, arglocs, force_store=[], guard_not_forced_op=None): save_all_regs = guard_not_forced_op is not None @@ -650,11 +663,11 @@ self.Perform(op, arglocs, resloc) def _consider_call(self, op, guard_not_forced_op=None): - calldescr = op.descr + calldescr = op.getdescr() assert isinstance(calldescr, BaseCallDescr) - assert len(calldescr.arg_classes) == len(op.args) - 1 + assert len(calldescr.arg_classes) == op.numargs() - 1 size = calldescr.get_result_size(self.translate_support_code) - self._call(op, [imm(size)] + [self.loc(arg) for arg in op.args], + self._call(op, [imm(size)] + [self.loc(op.getarg(i)) for i in range(op.numargs())], guard_not_forced_op=guard_not_forced_op) def consider_call(self, op): @@ -665,28 +678,29 @@ self._consider_call(op, guard_op) def consider_call_assembler(self, op, guard_op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, LoopToken) jd = descr.outermost_jitdriver_sd assert jd is not None size = jd.portal_calldescr.get_result_size(self.translate_support_code) vable_index = jd.index_of_virtualizable if vable_index >= 0: - self.rm._sync_var(op.args[vable_index]) - vable = self.fm.loc(op.args[vable_index]) + self.rm._sync_var(op.getarg(vable_index)) + vable = self.fm.loc(op.getarg(vable_index)) else: vable = imm(0) self._call(op, [imm(size), vable] + - [self.loc(arg) for arg in op.args], + [self.loc(op.getarg(i)) for i in range(op.numargs())], guard_not_forced_op=guard_op) def consider_cond_call_gc_wb(self, op): assert op.result is None - loc_newvalue = self.rm.make_sure_var_in_reg(op.args[1], op.args) + args = op.getarglist() + loc_newvalue = self.rm.make_sure_var_in_reg(op.getarg(1), args) # ^^^ we force loc_newvalue in a reg (unless it's a Const), # because it will be needed anyway by the following setfield_gc. # It avoids loading it twice from the memory. - loc_base = self.rm.make_sure_var_in_reg(op.args[0], op.args, + loc_base = self.rm.make_sure_var_in_reg(op.getarg(0), args, imm_fine=False) arglocs = [loc_base, loc_newvalue] # add eax, ecx and edx as extra "arguments" to ensure they are @@ -700,7 +714,7 @@ and self.rm.stays_alive(v)): arglocs.append(reg) self.PerformDiscard(op, arglocs) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) def _fastpath_malloc(self, op, descr): assert isinstance(descr, BaseSizeDescr) @@ -725,15 +739,15 @@ def consider_new(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr - if gc_ll_descr.can_inline_malloc(op.descr): - self._fastpath_malloc(op, op.descr) + if gc_ll_descr.can_inline_malloc(op.getdescr()): + self._fastpath_malloc(op, op.getdescr()) else: - args = gc_ll_descr.args_for_new(op.descr) + args = gc_ll_descr.args_for_new(op.getdescr()) arglocs = [imm(x) for x in args] return self._call(op, arglocs) def consider_new_with_vtable(self, op): - classint = op.args[0].getint() + classint = op.getarg(0).getint() descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): self._fastpath_malloc(op, descrsize) @@ -742,34 +756,34 @@ else: args = self.assembler.cpu.gc_ll_descr.args_for_new(descrsize) arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.args[0])) + arglocs.append(self.loc(op.getarg(0))) return self._call(op, arglocs) def consider_newstr(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newstr is not None: # framework GC - loc = self.loc(op.args[0]) + loc = self.loc(op.getarg(0)) return self._call(op, [loc]) # boehm GC (XXX kill the following code at some point) ofs_items, itemsize, ofs = symbolic.get_array_token(rstr.STR, self.translate_support_code) assert itemsize == 1 - return self._malloc_varsize(ofs_items, ofs, 0, op.args[0], + return self._malloc_varsize(ofs_items, ofs, 0, op.getarg(0), op.result) def consider_newunicode(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newunicode is not None: # framework GC - loc = self.loc(op.args[0]) + loc = self.loc(op.getarg(0)) return self._call(op, [loc]) # boehm GC (XXX kill the following code at some point) ofs_items, itemsize, ofs = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code) if itemsize == 4: - return self._malloc_varsize(ofs_items, ofs, 2, op.args[0], + return self._malloc_varsize(ofs_items, ofs, 2, op.getarg(0), op.result) elif itemsize == 2: - return self._malloc_varsize(ofs_items, ofs, 1, op.args[0], + return self._malloc_varsize(ofs_items, ofs, 1, op.getarg(0), op.result) else: assert False, itemsize @@ -784,7 +798,7 @@ else: tempbox = None other_loc = imm(ofs_items + (v.getint() << scale)) - self._call(ResOperation(rop.NEW, [v], res_v), + self._call(ResOperation(rop.NEW, [], res_v), [other_loc], [v]) loc = self.rm.make_sure_var_in_reg(v, [res_v]) assert self.loc(res_v) == eax @@ -792,22 +806,22 @@ self.rm.possibly_free_var(v) if tempbox is not None: self.rm.possibly_free_var(tempbox) - self.PerformDiscard(ResOperation(rop.SETFIELD_GC, [], None), + self.PerformDiscard(ResOperation(rop.SETFIELD_GC, [None, None], None), [eax, imm(ofs_length), imm(WORD), loc]) def consider_new_array(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newarray is not None: # framework GC - args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.descr) + args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.getdescr()) arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.args[0])) + arglocs.append(self.loc(op.getarg(0))) return self._call(op, arglocs) # boehm GC (XXX kill the following code at some point) scale_of_field, basesize, ofs_length, _ = ( - self._unpack_arraydescr(op.descr)) + self._unpack_arraydescr(op.getdescr())) return self._malloc_varsize(basesize, ofs_length, scale_of_field, - op.args[0], op.result) + op.getarg(0), op.result) def _unpack_arraydescr(self, arraydescr): assert isinstance(arraydescr, BaseArrayDescr) @@ -829,50 +843,54 @@ return imm(ofs), imm(size), ptr def consider_setfield_gc(self, op): - ofs_loc, size_loc, ptr = self._unpack_fielddescr(op.descr) + ofs_loc, size_loc, ptr = self._unpack_fielddescr(op.getdescr()) assert isinstance(size_loc, ImmedLoc) if size_loc.value == 1: need_lower_byte = True else: need_lower_byte = False - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - value_loc = self.make_sure_var_in_reg(op.args[1], op.args, + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + value_loc = self.make_sure_var_in_reg(op.getarg(1), args, need_lower_byte=need_lower_byte) - self.possibly_free_vars(op.args) + self.possibly_free_vars(args) self.PerformDiscard(op, [base_loc, ofs_loc, size_loc, value_loc]) consider_setfield_raw = consider_setfield_gc def consider_strsetitem(self, op): - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args) - value_loc = self.rm.make_sure_var_in_reg(op.args[2], op.args, + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) + value_loc = self.rm.make_sure_var_in_reg(op.getarg(2), args, need_lower_byte=True) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) self.PerformDiscard(op, [base_loc, ofs_loc, value_loc]) consider_unicodesetitem = consider_strsetitem def consider_setarrayitem_gc(self, op): - scale, ofs, _, ptr = self._unpack_arraydescr(op.descr) - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) + scale, ofs, _, ptr = self._unpack_arraydescr(op.getdescr()) + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) if scale == 0: need_lower_byte = True else: need_lower_byte = False - value_loc = self.make_sure_var_in_reg(op.args[2], op.args, + value_loc = self.make_sure_var_in_reg(op.getarg(2), args, need_lower_byte=need_lower_byte) - ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args) - self.possibly_free_vars(op.args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) + self.possibly_free_vars(args) self.PerformDiscard(op, [base_loc, ofs_loc, value_loc, imm(scale), imm(ofs)]) consider_setarrayitem_raw = consider_setarrayitem_gc def consider_getfield_gc(self, op): - ofs_loc, size_loc, _ = self._unpack_fielddescr(op.descr) - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - self.rm.possibly_free_vars(op.args) + ofs_loc, size_loc, _ = self._unpack_fielddescr(op.getdescr()) + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + self.rm.possibly_free_vars(args) result_loc = self.force_allocate_reg(op.result) self.Perform(op, [base_loc, ofs_loc, size_loc], result_loc) @@ -881,10 +899,11 @@ consider_getfield_gc_pure = consider_getfield_gc def consider_getarrayitem_gc(self, op): - scale, ofs, _, _ = self._unpack_arraydescr(op.descr) - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args) - self.rm.possibly_free_vars(op.args) + scale, ofs, _, _ = self._unpack_arraydescr(op.getdescr()) + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) + self.rm.possibly_free_vars_for_op(op) result_loc = self.force_allocate_reg(op.result) self.Perform(op, [base_loc, ofs_loc, imm(scale), imm(ofs)], result_loc) @@ -893,8 +912,8 @@ def consider_int_is_true(self, op, guard_op): # doesn't need arg to be in a register - argloc = self.loc(op.args[0]) - self.rm.possibly_free_var(op.args[0]) + argloc = self.loc(op.getarg(0)) + self.rm.possibly_free_var(op.getarg(0)) if guard_op is not None: self.perform_with_guard(op, guard_op, [argloc], None) else: @@ -904,33 +923,36 @@ consider_int_is_zero = consider_int_is_true def consider_same_as(self, op): - argloc = self.loc(op.args[0]) - self.possibly_free_var(op.args[0]) + argloc = self.loc(op.getarg(0)) + self.possibly_free_var(op.getarg(0)) resloc = self.force_allocate_reg(op.result) self.Perform(op, [argloc], resloc) #consider_cast_ptr_to_int = consider_same_as def consider_strlen(self, op): - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - self.rm.possibly_free_vars(op.args) + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + self.rm.possibly_free_vars_for_op(op) result_loc = self.rm.force_allocate_reg(op.result) self.Perform(op, [base_loc], result_loc) consider_unicodelen = consider_strlen def consider_arraylen_gc(self, op): - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, BaseArrayDescr) ofs = arraydescr.get_ofs_length(self.translate_support_code) - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - self.rm.possibly_free_vars(op.args) + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + self.rm.possibly_free_vars_for_op(op) result_loc = self.rm.force_allocate_reg(op.result) self.Perform(op, [base_loc, imm(ofs)], result_loc) def consider_strgetitem(self, op): - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args) - self.rm.possibly_free_vars(op.args) + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) + self.rm.possibly_free_vars_for_op(op) result_loc = self.rm.force_allocate_reg(op.result) self.Perform(op, [base_loc, ofs_loc], result_loc) @@ -939,7 +961,7 @@ def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None - descr = op.descr + descr = op.getdescr() assert isinstance(descr, LoopToken) self.jump_target_descr = descr nonfloatlocs, floatlocs = assembler.target_arglocs(self.jump_target_descr) @@ -951,17 +973,20 @@ xmmtmp = X86XMMRegisterManager.all_regs[0] xmmtmploc = self.xrm.force_allocate_reg(box1, selected_reg=xmmtmp) # Part about non-floats - src_locations = [self.loc(arg) for arg in op.args if arg.type != FLOAT] + # XXX we don't need a copy, we only just the original list + src_locations = [self.loc(op.getarg(i)) for i in range(op.numargs()) + if op.getarg(i).type != FLOAT] assert tmploc not in nonfloatlocs dst_locations = [loc for loc in nonfloatlocs if loc is not None] remap_frame_layout(assembler, src_locations, dst_locations, tmploc) # Part about floats - src_locations = [self.loc(arg) for arg in op.args if arg.type == FLOAT] + src_locations = [self.loc(op.getarg(i)) for i in range(op.numargs()) + if op.getarg(i).type == FLOAT] dst_locations = [loc for loc in floatlocs if loc is not None] remap_frame_layout(assembler, src_locations, dst_locations, xmmtmp) self.rm.possibly_free_var(box) self.xrm.possibly_free_var(box1) - self.possibly_free_vars(op.args) + self.possibly_free_vars_for_op(op) assembler.closing_jump(self.jump_target_descr) def consider_debug_merge_point(self, op): @@ -1002,12 +1027,21 @@ def add_none_argument(fn): return lambda self, op: fn(self, op, None) +def is_comparison_or_ovf_op(opnum): + from pypy.jit.metainterp.resoperation import opclasses, AbstractResOp + cls = opclasses[opnum] + # hack hack: in theory they are instance method, but they don't use + # any instance field, we can use a fake object + class Fake(cls): + pass + op = Fake(None) + return op.is_comparison() or op.is_ovf() + for name, value in RegAlloc.__dict__.iteritems(): if name.startswith('consider_'): name = name[len('consider_'):] num = getattr(rop, name.upper()) - if (ResOperation(num, [], None).is_comparison() - or ResOperation(num, [], None).is_ovf() + if (is_comparison_or_ovf_op(num) or num == rop.CALL_MAY_FORCE or num == rop.CALL_ASSEMBLER): oplist_with_guard[num] = value oplist[num] = add_none_argument(value) Modified: pypy/trunk/pypy/jit/backend/x86/test/test_recompilation.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/test/test_recompilation.py (original) +++ pypy/trunk/pypy/jit/backend/x86/test/test_recompilation.py Wed Sep 22 14:17:16 2010 @@ -47,7 +47,7 @@ finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - descr = loop.operations[2].descr + descr = loop.operations[2].getdescr() new = descr._x86_bridge_frame_depth assert descr._x86_bridge_param_depth == 0 # XXX: Maybe add enough ops to force stack on 64-bit as well? @@ -114,8 +114,8 @@ assert loop.token._x86_param_depth == 0 # XXX: Maybe add enough ops to force stack on 64-bit as well? if IS_X86_32: - assert guard_op.descr._x86_bridge_frame_depth > loop_frame_depth - assert guard_op.descr._x86_bridge_param_depth == 0 + assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth + assert guard_op.getdescr()._x86_bridge_param_depth == 0 self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 0) self.cpu.set_future_value_int(2, 0) Modified: pypy/trunk/pypy/jit/backend/x86/test/test_regalloc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/test/test_regalloc.py (original) +++ pypy/trunk/pypy/jit/backend/x86/test/test_regalloc.py Wed Sep 22 14:17:16 2010 @@ -9,7 +9,7 @@ from pypy.jit.backend.llsupport.descr import GcCache from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.regalloc import RegAlloc, X86RegisterManager,\ - FloatConstants + FloatConstants, is_comparison_or_ovf_op from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64 from pypy.jit.metainterp.test.oparser import parse from pypy.rpython.lltypesystem import lltype, llmemory, rffi @@ -17,6 +17,11 @@ from pypy.rpython.lltypesystem import rclass, rstr from pypy.jit.backend.x86.rx86 import * +def test_is_comparison_or_ovf_op(): + assert not is_comparison_or_ovf_op(rop.INT_ADD) + assert is_comparison_or_ovf_op(rop.INT_ADD_OVF) + assert is_comparison_or_ovf_op(rop.INT_EQ) + CPU = getcpuclass() class MockGcDescr(GcCache): def get_funcptr_for_new(self): @@ -159,8 +164,8 @@ assert guard_op.is_guard() bridge = self.parse(ops, **kwds) assert ([box.type for box in bridge.inputargs] == - [box.type for box in guard_op.fail_args]) - faildescr = guard_op.descr + [box.type for box in guard_op.getfailargs()]) + faildescr = guard_op.getdescr() self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations) return bridge @@ -607,7 +612,7 @@ ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].descr._x86_bridge_param_depth == self.expected_param_depth(2) + assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) self.cpu.set_future_value_int(0, 4) self.cpu.set_future_value_int(1, 7) @@ -630,7 +635,7 @@ ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].descr._x86_bridge_param_depth == self.expected_param_depth(2) + assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) self.cpu.set_future_value_int(0, 4) self.cpu.set_future_value_int(1, 7) Modified: pypy/trunk/pypy/jit/backend/x86/test/test_runner.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/test/test_runner.py (original) +++ pypy/trunk/pypy/jit/backend/x86/test/test_runner.py Wed Sep 22 14:17:16 2010 @@ -265,7 +265,7 @@ ResOperation(rop.FINISH, [ConstInt(0)], None, descr=BasicFailDescr()), ] - ops[-2].fail_args = [i1] + ops[-2].setfailargs([i1]) looptoken = LoopToken() self.cpu.compile_loop([b], ops, looptoken) if op == rop.INT_IS_TRUE: @@ -314,7 +314,7 @@ ResOperation(rop.FINISH, [ConstInt(0)], None, descr=BasicFailDescr()), ] - ops[-2].fail_args = [i1] + ops[-2].setfailargs([i1]) inputargs = [i for i in (a, b) if isinstance(i, Box)] looptoken = LoopToken() self.cpu.compile_loop(inputargs, ops, looptoken) @@ -353,7 +353,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[3].fail_args = [i1] + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) name, loopaddress, loopsize = agent.functions[0] assert name == "Loop # 0: hello" @@ -368,7 +368,7 @@ ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye")], None), ResOperation(rop.JUMP, [i1b], None, descr=looptoken), ] - bridge[1].fail_args = [i1b] + bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge) name, address, size = agent.functions[1] @@ -462,7 +462,7 @@ cmp_result = BoxInt() ops.append(ResOperation(float_op, args, cmp_result)) ops.append(ResOperation(guard_op, [cmp_result], None, descr=BasicFailDescr())) - ops[-1].fail_args = [failed] + ops[-1].setfailargs([failed]) ops.append(ResOperation(rop.FINISH, [finished], None, descr=BasicFailDescr())) Modified: pypy/trunk/pypy/jit/metainterp/compile.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/compile.py (original) +++ pypy/trunk/pypy/jit/metainterp/compile.py Wed Sep 22 14:17:16 2010 @@ -51,7 +51,7 @@ def compile_new_loop(metainterp, old_loop_tokens, greenkey, start): """Try to compile a new loop by closing the current history back to the first operation. - """ + """ history = metainterp.history loop = create_empty_loop(metainterp) loop.greenkey = greenkey @@ -65,7 +65,7 @@ jitdriver_sd = metainterp.jitdriver_sd loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd) loop.token = loop_token - loop.operations[-1].descr = loop_token # patch the target of the JUMP + loop.operations[-1].setdescr(loop_token) # patch the target of the JUMP try: old_loop_token = jitdriver_sd.warmstate.optimize_loop( metainterp_sd, old_loop_tokens, loop) @@ -133,7 +133,7 @@ metainterp_sd.profiler.end_backend() if not we_are_translated(): metainterp_sd.stats.compiled() - metainterp_sd.log("compiled new bridge") + metainterp_sd.log("compiled new bridge") # ____________________________________________________________ @@ -177,7 +177,7 @@ class TerminatingLoopToken(LoopToken): terminating = True - + def __init__(self, nargs, finishdescr): self.specnodes = [prebuiltNotSpecNode]*nargs self.finishdescr = finishdescr @@ -233,14 +233,14 @@ self.metainterp_sd = metainterp_sd def store_final_boxes(self, guard_op, boxes): - guard_op.fail_args = boxes - self.guard_opnum = guard_op.opnum + guard_op.setfailargs(boxes) + self.guard_opnum = guard_op.getopnum() def make_a_counter_per_value(self, guard_value_op): - assert guard_value_op.opnum == rop.GUARD_VALUE - box = guard_value_op.args[0] + assert guard_value_op.getopnum() == rop.GUARD_VALUE + box = guard_value_op.getarg(0) try: - i = guard_value_op.fail_args.index(box) + i = guard_value_op.getfailargs().index(box) except ValueError: return # xxx probably very rare else: @@ -508,7 +508,7 @@ def compile_new_bridge(metainterp, old_loop_tokens, resumekey): """Try to compile a new bridge leading from the beginning of the history to some existing place. - """ + """ # The history contains new operations to attach as the code for the # failure of 'resumekey.guard_op'. # @@ -540,13 +540,14 @@ op = new_loop.operations[-1] if not isinstance(target_loop_token, TerminatingLoopToken): # normal case - op.descr = target_loop_token # patch the jump target + op.setdescr(target_loop_token) # patch the jump target else: # The target_loop_token is a pseudo loop token, # e.g. loop_tokens_done_with_this_frame_void[0] # Replace the operation with the real operation we want, i.e. a FINISH descr = target_loop_token.finishdescr - new_op = ResOperation(rop.FINISH, op.args, None, descr=descr) + args = op.getarglist() + new_op = ResOperation(rop.FINISH, args, None, descr=descr) new_loop.operations[-1] = new_op # ____________________________________________________________ @@ -597,6 +598,6 @@ ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=faildescr), ResOperation(rop.FINISH, finishargs, None, descr=jd.portal_finishtoken) ] - operations[1].fail_args = [] + operations[1].setfailargs([]) cpu.compile_loop(inputargs, operations, loop_token) return loop_token Modified: pypy/trunk/pypy/jit/metainterp/graphpage.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/graphpage.py (original) +++ pypy/trunk/pypy/jit/metainterp/graphpage.py Wed Sep 22 14:17:16 2010 @@ -17,13 +17,13 @@ for graph, highlight in graphs: for op in graph.get_operations(): if is_interesting_guard(op): - graphs.append((SubGraph(op.descr._debug_suboperations), + graphs.append((SubGraph(op.getdescr()._debug_suboperations), highlight)) graphpage = ResOpGraphPage(graphs, errmsg) graphpage.display() def is_interesting_guard(op): - return hasattr(op.descr, '_debug_suboperations') + return hasattr(op.getdescr(), '_debug_suboperations') class ResOpGraphPage(GraphPage): @@ -76,7 +76,7 @@ for i, op in enumerate(graph.get_operations()): if is_interesting_guard(op): self.mark_starter(graphindex, i+1) - if op.opnum == rop.DEBUG_MERGE_POINT: + if op.getopnum() == rop.DEBUG_MERGE_POINT: if not last_was_mergepoint: last_was_mergepoint = True self.mark_starter(graphindex, i) @@ -155,7 +155,7 @@ op = operations[opindex] lines.append(repr(op)) if is_interesting_guard(op): - tgt = op.descr._debug_suboperations[0] + tgt = op.getdescr()._debug_suboperations[0] tgt_g, tgt_i = self.all_operations[tgt] self.genedge((graphindex, opstartindex), (tgt_g, tgt_i), @@ -167,8 +167,8 @@ self.genedge((graphindex, opstartindex), (graphindex, opindex)) break - if op.opnum == rop.JUMP: - tgt = op.descr + if op.getopnum() == rop.JUMP: + tgt = op.getdescr() tgt_g = -1 if tgt is None: tgt_g = graphindex @@ -191,7 +191,9 @@ def getlinks(self): boxes = {} for op in self.all_operations: - for box in op.args + [op.result]: + args = op.getarglist() + args.append(op.result) + for box in args: if getattr(box, 'is_box', False): boxes[box] = True links = {} Modified: pypy/trunk/pypy/jit/metainterp/history.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/history.py (original) +++ pypy/trunk/pypy/jit/metainterp/history.py Wed Sep 22 14:17:16 2010 @@ -532,7 +532,7 @@ class BoxFloat(Box): type = FLOAT _attrs_ = ('value',) - + def __init__(self, floatval=0.0): assert isinstance(floatval, float) self.value = floatval @@ -759,33 +759,34 @@ assert len(seen) == len(inputargs), ( "duplicate Box in the Loop.inputargs") TreeLoop.check_consistency_of_branch(operations, seen) - + @staticmethod def check_consistency_of_branch(operations, seen): "NOT_RPYTHON" for op in operations: - for box in op.args: + for i in range(op.numargs()): + box = op.getarg(i) if isinstance(box, Box): assert box in seen if op.is_guard(): - assert op.descr is not None - if hasattr(op.descr, '_debug_suboperations'): - ops = op.descr._debug_suboperations + assert op.getdescr() is not None + if hasattr(op.getdescr(), '_debug_suboperations'): + ops = op.getdescr()._debug_suboperations TreeLoop.check_consistency_of_branch(ops, seen.copy()) - for box in op.fail_args or []: + for box in op.getfailargs() or []: if box is not None: assert isinstance(box, Box) assert box in seen else: - assert op.fail_args is None + assert op.getfailargs() is None box = op.result if box is not None: assert isinstance(box, Box) assert box not in seen seen[box] = True assert operations[-1].is_final() - if operations[-1].opnum == rop.JUMP: - target = operations[-1].descr + if operations[-1].getopnum() == rop.JUMP: + target = operations[-1].getdescr() if target is not None: assert isinstance(target, LoopToken) @@ -793,7 +794,8 @@ # RPython-friendly print '%r: inputargs =' % self, self._dump_args(self.inputargs) for op in self.operations: - print '\t', op.getopname(), self._dump_args(op.args), \ + args = op.getarglist() + print '\t', op.getopname(), self._dump_args(args), \ self._dump_box(op.result) def _dump_args(self, boxes): @@ -809,14 +811,14 @@ return '<%s>' % (self.name,) def _list_all_operations(result, operations, omit_finish=True): - if omit_finish and operations[-1].opnum == rop.FINISH: + if omit_finish and operations[-1].getopnum() == rop.FINISH: # xxx obscure return result.extend(operations) for op in operations: - if op.is_guard() and op.descr: - if hasattr(op.descr, '_debug_suboperations'): - ops = op.descr._debug_suboperations + if op.is_guard() and op.getdescr(): + if hasattr(op.getdescr(), '_debug_suboperations'): + ops = op.getdescr()._debug_suboperations _list_all_operations(result, ops, omit_finish) # ____________________________________________________________ @@ -885,7 +887,7 @@ self.aborted_count += 1 def entered(self): - self.enter_count += 1 + self.enter_count += 1 def compiled(self): self.compiled_count += 1 @@ -898,7 +900,7 @@ def add_new_loop(self, loop): self.loops.append(loop) - + # test read interface def get_all_loops(self): Modified: pypy/trunk/pypy/jit/metainterp/logger.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/logger.py (original) +++ pypy/trunk/pypy/jit/metainterp/logger.py Wed Sep 22 14:17:16 2010 @@ -79,27 +79,27 @@ debug_print('[' + args + ']') for i in range(len(operations)): op = operations[i] - if op.opnum == rop.DEBUG_MERGE_POINT: - loc = op.args[0]._get_str() + if op.getopnum() == rop.DEBUG_MERGE_POINT: + loc = op.getarg(0)._get_str() debug_print("debug_merge_point('%s')" % (loc,)) continue - args = ", ".join([self.repr_of_arg(memo, arg) for arg in op.args]) + args = ", ".join([self.repr_of_arg(memo, op.getarg(i)) for i in range(op.numargs())]) if op.result is not None: res = self.repr_of_arg(memo, op.result) + " = " else: res = "" is_guard = op.is_guard() - if op.descr is not None: - descr = op.descr + if op.getdescr() is not None: + descr = op.getdescr() if is_guard and self.guard_number: index = self.metainterp_sd.cpu.get_fail_descr_number(descr) r = "" % index else: r = self.repr_of_descr(descr) args += ', descr=' + r - if is_guard and op.fail_args is not None: + if is_guard and op.getfailargs() is not None: fail_args = ' [' + ", ".join([self.repr_of_arg(memo, arg) - for arg in op.fail_args]) + ']' + for arg in op.getfailargs()]) + ']' else: fail_args = '' debug_print(res + op.getopname() + Modified: pypy/trunk/pypy/jit/metainterp/optimize.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimize.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimize.py Wed Sep 22 14:17:16 2010 @@ -43,7 +43,7 @@ finder.find_nodes_bridge(bridge) for old_loop_token in old_loop_tokens: if finder.bridge_matches(old_loop_token.specnodes): - bridge.operations[-1].descr = old_loop_token # patch jump target + bridge.operations[-1].setdescr(old_loop_token) # patch jump target optimize_bridge_1(metainterp_sd, bridge) return old_loop_token return None Modified: pypy/trunk/pypy/jit/metainterp/optimizefindnode.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizefindnode.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizefindnode.py Wed Sep 22 14:17:16 2010 @@ -144,7 +144,7 @@ def find_nodes(self, operations): for op in operations: - opnum = op.opnum + opnum = op.getopnum() for value, func in find_nodes_ops: if opnum == value: func(self, op) @@ -154,18 +154,20 @@ def find_nodes_default(self, op): if op.is_always_pure(): - for arg in op.args: + for i in range(op.numargs()): + arg = op.getarg(i) if self.get_constant_box(arg) is None: break else: # all constant arguments: we can constant-fold - argboxes = [self.get_constant_box(arg) for arg in op.args] + argboxes = [self.get_constant_box(op.getarg(i)) + for i in range(op.numargs())] resbox = execute_nonspec(self.cpu, None, - op.opnum, argboxes, op.descr) + op.getopnum(), argboxes, op.getdescr()) self.set_constant_node(op.result, resbox.constbox()) # default case: mark the arguments as escaping - for box in op.args: - self.getnode(box).mark_escaped() + for i in range(op.numargs()): + self.getnode(op.getarg(i)).mark_escaped() def find_nodes_no_escape(self, op): pass # for operations that don't escape their arguments @@ -178,53 +180,53 @@ def find_nodes_NEW_WITH_VTABLE(self, op): instnode = InstanceNode() - box = op.args[0] + box = op.getarg(0) assert isinstance(box, Const) instnode.knownclsbox = box self.nodes[op.result] = instnode def find_nodes_NEW(self, op): instnode = InstanceNode() - instnode.structdescr = op.descr + instnode.structdescr = op.getdescr() self.nodes[op.result] = instnode def find_nodes_NEW_ARRAY(self, op): - lengthbox = op.args[0] + lengthbox = op.getarg(0) lengthbox = self.get_constant_box(lengthbox) if lengthbox is None: return # var-sized arrays are not virtual arraynode = InstanceNode() arraynode.arraysize = lengthbox.getint() - arraynode.arraydescr = op.descr + arraynode.arraydescr = op.getdescr() self.nodes[op.result] = arraynode def find_nodes_ARRAYLEN_GC(self, op): - arraynode = self.getnode(op.args[0]) + arraynode = self.getnode(op.getarg(0)) if arraynode.arraydescr is not None: resbox = ConstInt(arraynode.arraysize) self.set_constant_node(op.result, resbox) def find_nodes_GUARD_CLASS(self, op): - instnode = self.getnode(op.args[0]) + instnode = self.getnode(op.getarg(0)) if instnode.fromstart: # only useful (and safe) in this case - box = op.args[1] + box = op.getarg(1) assert isinstance(box, Const) instnode.knownclsbox = box def find_nodes_GUARD_VALUE(self, op): - instnode = self.getnode(op.args[0]) + instnode = self.getnode(op.getarg(0)) if instnode.fromstart: # only useful (and safe) in this case - box = op.args[1] + box = op.getarg(1) assert isinstance(box, Const) instnode.knownvaluebox = box def find_nodes_SETFIELD_GC(self, op): - instnode = self.getnode(op.args[0]) - fieldnode = self.getnode(op.args[1]) + instnode = self.getnode(op.getarg(0)) + fieldnode = self.getnode(op.getarg(1)) if instnode.escaped: fieldnode.mark_escaped() return # nothing to be gained from tracking the field - field = op.descr + field = op.getdescr() assert isinstance(field, AbstractValue) if instnode.curfields is None: instnode.curfields = {} @@ -232,10 +234,10 @@ instnode.add_escape_dependency(fieldnode) def find_nodes_GETFIELD_GC(self, op): - instnode = self.getnode(op.args[0]) + instnode = self.getnode(op.getarg(0)) if instnode.escaped: return # nothing to be gained from tracking the field - field = op.descr + field = op.getdescr() assert isinstance(field, AbstractValue) if instnode.curfields is not None and field in instnode.curfields: fieldnode = instnode.curfields[field] @@ -254,13 +256,13 @@ find_nodes_GETFIELD_GC_PURE = find_nodes_GETFIELD_GC def find_nodes_SETARRAYITEM_GC(self, op): - indexbox = op.args[1] + indexbox = op.getarg(1) indexbox = self.get_constant_box(indexbox) if indexbox is None: self.find_nodes_default(op) # not a Const index return - arraynode = self.getnode(op.args[0]) - itemnode = self.getnode(op.args[2]) + arraynode = self.getnode(op.getarg(0)) + itemnode = self.getnode(op.getarg(2)) if arraynode.escaped: itemnode.mark_escaped() return # nothing to be gained from tracking the item @@ -270,12 +272,12 @@ arraynode.add_escape_dependency(itemnode) def find_nodes_GETARRAYITEM_GC(self, op): - indexbox = op.args[1] + indexbox = op.getarg(1) indexbox = self.get_constant_box(indexbox) if indexbox is None: self.find_nodes_default(op) # not a Const index return - arraynode = self.getnode(op.args[0]) + arraynode = self.getnode(op.getarg(0)) if arraynode.escaped: return # nothing to be gained from tracking the item index = indexbox.getint() @@ -298,13 +300,15 @@ def find_nodes_JUMP(self, op): # only set up the 'unique' field of the InstanceNodes; # real handling comes later (build_result_specnodes() for loops). - for box in op.args: + for i in range(op.numargs()): + box = op.getarg(i) self.getnode(box).set_unique_nodes() def find_nodes_FINISH(self, op): # only for bridges, and only for the ones that end in a 'return' # or 'raise'; all other cases end with a JUMP. - for box in op.args: + for i in range(op.numargs()): + box = op.getarg(i) self.getnode(box).unique = UNIQUE_NO find_nodes_ops = _findall(NodeFinder, 'find_nodes_') @@ -324,7 +328,7 @@ def show(self): from pypy.jit.metainterp.viewnode import viewnodes, view op = self._loop.operations[-1] - assert op.opnum == rop.JUMP + assert op.getopnum() == rop.JUMP exitnodes = [self.getnode(arg) for arg in op.args] viewnodes(self.inputnodes, exitnodes) if hasattr(self._loop.token, "specnodes"): @@ -343,14 +347,14 @@ # Build the list of specnodes based on the result # computed by NodeFinder.find_nodes(). op = loop.operations[-1] - assert op.opnum == rop.JUMP - assert len(self.inputnodes) == len(op.args) + assert op.getopnum() == rop.JUMP + assert len(self.inputnodes) == op.numargs() while True: self.restart_needed = False specnodes = [] - for i in range(len(op.args)): + for i in range(op.numargs()): inputnode = self.inputnodes[i] - exitnode = self.getnode(op.args[i]) + exitnode = self.getnode(op.getarg(i)) specnodes.append(self.intersect(inputnode, exitnode)) if not self.restart_needed: break @@ -562,9 +566,9 @@ def bridge_matches(self, nextloop_specnodes): jump_op = self.jump_op - assert len(jump_op.args) == len(nextloop_specnodes) + assert jump_op.numargs() == len(nextloop_specnodes) for i in range(len(nextloop_specnodes)): - exitnode = self.getnode(jump_op.args[i]) + exitnode = self.getnode(jump_op.getarg(i)) if not nextloop_specnodes[i].matches_instance_node(exitnode): return False return True Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/heap.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/heap.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/heap.py Wed Sep 22 14:17:16 2010 @@ -45,7 +45,7 @@ op = self.lazy_setfields.get(descr, None) if op is None: return None - return self.getvalue(op.args[1]) + return self.getvalue(op.getarg(1)) return d.get(value, None) def cache_arrayitem_value(self, descr, value, indexvalue, fieldvalue, write=False): @@ -105,7 +105,7 @@ if op.is_guard(): self.optimizer.pendingfields = self.force_lazy_setfields_for_guard() return - opnum = op.opnum + opnum = op.getopnum() if (opnum == rop.SETFIELD_GC or opnum == rop.SETARRAYITEM_GC or opnum == rop.DEBUG_MERGE_POINT): @@ -117,7 +117,7 @@ if opnum == rop.CALL_ASSEMBLER: effectinfo = None else: - effectinfo = op.descr.get_extra_info() + effectinfo = op.getdescr().get_extra_info() if effectinfo is not None: # XXX we can get the wrong complexity here, if the lists # XXX stored on effectinfo are large @@ -142,7 +142,7 @@ return self.force_all_lazy_setfields() elif op.is_final() or (not we_are_translated() and - op.opnum < 0): # escape() operations + op.getopnum() < 0): # escape() operations self.force_all_lazy_setfields() self.clean_caches() @@ -166,10 +166,11 @@ # - is_comparison() for cases like "int_eq/setfield_gc/guard_true" # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced" # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow" - opnum = prevop.opnum + opnum = prevop.getopnum() + lastop_args = lastop.getarglist() if ((prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE or prevop.is_ovf()) - and prevop.result not in lastop.args): + and prevop.result not in lastop_args): newoperations[-2] = lastop newoperations[-1] = prevop @@ -189,9 +190,9 @@ # the only really interesting case that we need to handle in the # guards' resume data is that of a virtual object that is stored # into a field of a non-virtual object. - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) assert not value.is_virtual() # it must be a non-virtual - fieldvalue = self.getvalue(op.args[1]) + fieldvalue = self.getvalue(op.getarg(1)) if fieldvalue.is_virtual(): # this is the case that we leave to resume.py pendingfields.append((descr, value.box, @@ -202,20 +203,20 @@ def force_lazy_setfield_if_necessary(self, op, value, write=False): try: - op1 = self.lazy_setfields[op.descr] + op1 = self.lazy_setfields[op.getdescr()] except KeyError: if write: - self.lazy_setfields_descrs.append(op.descr) + self.lazy_setfields_descrs.append(op.getdescr()) else: - if self.getvalue(op1.args[0]) is not value: - self.force_lazy_setfield(op.descr) + if self.getvalue(op1.getarg(0)) is not value: + self.force_lazy_setfield(op.getdescr()) def optimize_GETFIELD_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) self.force_lazy_setfield_if_necessary(op, value) # check if the field was read from another getfield_gc just before # or has been written to recently - fieldvalue = self.read_cached_field(op.descr, value) + fieldvalue = self.read_cached_field(op.getdescr(), value) if fieldvalue is not None: self.make_equal_to(op.result, fieldvalue) return @@ -225,38 +226,38 @@ self.emit_operation(op) # FIXME: These might need constant propagation? # then remember the result of reading the field fieldvalue = self.getvalue(op.result) - self.cache_field_value(op.descr, value, fieldvalue) + self.cache_field_value(op.getdescr(), value, fieldvalue) def optimize_SETFIELD_GC(self, op): - value = self.getvalue(op.args[0]) - fieldvalue = self.getvalue(op.args[1]) + value = self.getvalue(op.getarg(0)) + fieldvalue = self.getvalue(op.getarg(1)) self.force_lazy_setfield_if_necessary(op, value, write=True) - self.lazy_setfields[op.descr] = op + self.lazy_setfields[op.getdescr()] = op # remember the result of future reads of the field - self.cache_field_value(op.descr, value, fieldvalue, write=True) + self.cache_field_value(op.getdescr(), value, fieldvalue, write=True) def optimize_GETARRAYITEM_GC(self, op): - value = self.getvalue(op.args[0]) - indexvalue = self.getvalue(op.args[1]) - fieldvalue = self.read_cached_arrayitem(op.descr, value, indexvalue) + value = self.getvalue(op.getarg(0)) + indexvalue = self.getvalue(op.getarg(1)) + fieldvalue = self.read_cached_arrayitem(op.getdescr(), value, indexvalue) if fieldvalue is not None: self.make_equal_to(op.result, fieldvalue) return ###self.optimizer.optimize_default(op) self.emit_operation(op) # FIXME: These might need constant propagation? fieldvalue = self.getvalue(op.result) - self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue) + self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue) def optimize_SETARRAYITEM_GC(self, op): self.emit_operation(op) - value = self.getvalue(op.args[0]) - fieldvalue = self.getvalue(op.args[2]) - indexvalue = self.getvalue(op.args[1]) - self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue, + value = self.getvalue(op.getarg(0)) + fieldvalue = self.getvalue(op.getarg(2)) + indexvalue = self.getvalue(op.getarg(1)) + self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue, write=True) def propagate_forward(self, op): - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py Wed Sep 22 14:17:16 2010 @@ -10,7 +10,7 @@ remove redundant guards""" def propagate_forward(self, op): - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) @@ -31,7 +31,7 @@ op = self.optimizer.producer[box] except KeyError: return - opnum = op.opnum + opnum = op.getopnum() for value, func in propagate_bounds_ops: if opnum == value: func(self, op) @@ -39,14 +39,14 @@ def optimize_GUARD_TRUE(self, op): self.emit_operation(op) - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) optimize_GUARD_FALSE = optimize_GUARD_TRUE optimize_GUARD_VALUE = optimize_GUARD_TRUE def optimize_INT_AND(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) r = self.getvalue(op.result) @@ -60,74 +60,74 @@ r.intbound.intersect(IntBound(0,val)) def optimize_INT_SUB(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(v1.intbound.sub_bound(v2.intbound)) def optimize_INT_ADD(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(v1.intbound.add_bound(v2.intbound)) def optimize_INT_MUL(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(v1.intbound.mul_bound(v2.intbound)) def optimize_INT_ADD_OVF(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.add_bound(v2.intbound) if resbound.has_lower and resbound.has_upper and \ - self.nextop().opnum == rop.GUARD_NO_OVERFLOW: + self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW: # Transform into INT_ADD and remove guard - op.opnum = rop.INT_ADD + op = op.copy_and_change(rop.INT_ADD) self.skip_nextop() - self.optimize_INT_ADD(op) + self.optimize_INT_ADD(op) # emit the op else: self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(resbound) def optimize_INT_SUB_OVF(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.sub_bound(v2.intbound) if resbound.has_lower and resbound.has_upper and \ - self.nextop().opnum == rop.GUARD_NO_OVERFLOW: + self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW: # Transform into INT_SUB and remove guard - op.opnum = rop.INT_SUB + op = op.copy_and_change(rop.INT_SUB) self.skip_nextop() - self.optimize_INT_SUB(op) + self.optimize_INT_SUB(op) # emit the op else: self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(resbound) def optimize_INT_MUL_OVF(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.mul_bound(v2.intbound) if resbound.has_lower and resbound.has_upper and \ - self.nextop().opnum == rop.GUARD_NO_OVERFLOW: + self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW: # Transform into INT_MUL and remove guard - op.opnum = rop.INT_MUL + op = op.copy_and_change(rop.INT_MUL) self.skip_nextop() - self.optimize_INT_MUL(op) + self.optimize_INT_MUL(op) # emit the op else: self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(resbound) def optimize_INT_LT(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_ge(v2.intbound): @@ -136,8 +136,8 @@ self.emit_operation(op) def optimize_INT_GT(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_le(v2.intbound): @@ -146,8 +146,8 @@ self.emit_operation(op) def optimize_INT_LE(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_le(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_gt(v2.intbound): @@ -156,8 +156,8 @@ self.emit_operation(op) def optimize_INT_GE(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_ge(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_lt(v2.intbound): @@ -166,8 +166,8 @@ self.emit_operation(op) def optimize_INT_EQ(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 0) elif v1.intbound.known_lt(v2.intbound): @@ -176,8 +176,8 @@ self.emit_operation(op) def optimize_INT_NE(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_lt(v2.intbound): @@ -192,115 +192,114 @@ optimize_STRLEN = optimize_ARRAYLEN_GC - def make_int_lt(self, args): - v1 = self.getvalue(args[0]) - v2 = self.getvalue(args[1]) + def make_int_lt(self, box1, box2): + v1 = self.getvalue(box1) + v2 = self.getvalue(box2) if v1.intbound.make_lt(v2.intbound): - self.propagate_bounds_backward(args[0]) + self.propagate_bounds_backward(box1) if v2.intbound.make_gt(v1.intbound): - self.propagate_bounds_backward(args[1]) + self.propagate_bounds_backward(box2) - - def make_int_le(self, args): - v1 = self.getvalue(args[0]) - v2 = self.getvalue(args[1]) + def make_int_le(self, box1, box2): + v1 = self.getvalue(box1) + v2 = self.getvalue(box2) if v1.intbound.make_le(v2.intbound): - self.propagate_bounds_backward(args[0]) + self.propagate_bounds_backward(box1) if v2.intbound.make_ge(v1.intbound): - self.propagate_bounds_backward(args[1]) + self.propagate_bounds_backward(box2) - def make_int_gt(self, args): - self.make_int_lt([args[1], args[0]]) + def make_int_gt(self, box1, box2): + self.make_int_lt(box2, box1) - def make_int_ge(self, args): - self.make_int_le([args[1], args[0]]) + def make_int_ge(self, box1, box2): + self.make_int_le(box2, box1) def propagate_bounds_INT_LT(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - self.make_int_lt(op.args) + self.make_int_lt(op.getarg(0), op.getarg(1)) else: - self.make_int_ge(op.args) + self.make_int_ge(op.getarg(0), op.getarg(1)) def propagate_bounds_INT_GT(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - self.make_int_gt(op.args) + self.make_int_gt(op.getarg(0), op.getarg(1)) else: - self.make_int_le(op.args) + self.make_int_le(op.getarg(0), op.getarg(1)) def propagate_bounds_INT_LE(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - self.make_int_le(op.args) + self.make_int_le(op.getarg(0), op.getarg(1)) else: - self.make_int_gt(op.args) + self.make_int_gt(op.getarg(0), op.getarg(1)) def propagate_bounds_INT_GE(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - self.make_int_ge(op.args) + self.make_int_ge(op.getarg(0), op.getarg(1)) else: - self.make_int_lt(op.args) + self.make_int_lt(op.getarg(0), op.getarg(1)) def propagate_bounds_INT_EQ(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.intersect(v2.intbound): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) if v2.intbound.intersect(v1.intbound): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_NE(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_0): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.intersect(v2.intbound): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) if v2.intbound.intersect(v1.intbound): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_ADD(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) r = self.getvalue(op.result) b = r.intbound.sub_bound(v2.intbound) if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) b = r.intbound.sub_bound(v1.intbound) if v2.intbound.intersect(b): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_SUB(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) r = self.getvalue(op.result) b = r.intbound.add_bound(v2.intbound) if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) b = r.intbound.sub_bound(v1.intbound).mul(-1) if v2.intbound.intersect(b): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_MUL(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) r = self.getvalue(op.result) b = r.intbound.div_bound(v2.intbound) if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) b = r.intbound.div_bound(v1.intbound) if v2.intbound.intersect(b): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) propagate_bounds_INT_ADD_OVF = propagate_bounds_INT_ADD propagate_bounds_INT_SUB_OVF = propagate_bounds_INT_SUB Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py Wed Sep 22 14:17:16 2010 @@ -16,12 +16,12 @@ LEVEL_UNKNOWN = '\x00' LEVEL_NONNULL = '\x01' LEVEL_KNOWNCLASS = '\x02' # might also mean KNOWNARRAYDESCR, for arrays -LEVEL_CONSTANT = '\x03' +LEVEL_CONSTANT = '\x03' import sys MAXINT = sys.maxint MININT = -sys.maxint - 1 - + class OptValue(object): _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound') last_guard_index = -1 @@ -36,7 +36,7 @@ if isinstance(box, Const): self.make_constant(box) # invariant: box is a Const if and only if level == LEVEL_CONSTANT - + def force_box(self): return self.box @@ -171,7 +171,7 @@ def new_const_item(self, arraydescr): return self.optimizer.new_const_item(arraydescr) - + def pure(self, opnum, args, result): op = ResOperation(opnum, args, result) self.optimizer.pure_operations[self.optimizer.make_args_key(op)] = op @@ -184,7 +184,7 @@ def setup(self, virtuals): pass - + class Optimizer(Optimization): def __init__(self, metainterp_sd, loop, optimizations=None, virtuals=True): @@ -308,7 +308,7 @@ def propagate_forward(self, op): self.producer[op.result] = op - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) @@ -323,15 +323,15 @@ self._emit_operation(op) def _emit_operation(self, op): - for i in range(len(op.args)): - arg = op.args[i] + for i in range(op.numargs()): + arg = op.getarg(i) if arg in self.values: box = self.values[arg].force_box() - op.args[i] = box + op.setarg(i, box) self.metainterp_sd.profiler.count(jitprof.OPT_OPS) if op.is_guard(): self.metainterp_sd.profiler.count(jitprof.OPT_GUARDS) - self.store_final_boxes_in_guard(op) + op = self.store_final_boxes_in_guard(op) elif op.can_raise(): self.exception_might_have_happened = True elif op.returns_bool_result(): @@ -340,7 +340,7 @@ def store_final_boxes_in_guard(self, op): ###pendingfields = self.heap_op_optimizer.force_lazy_setfields_for_guard() - descr = op.descr + descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo) newboxes = modifier.finish(self.values, self.pendingfields) @@ -348,49 +348,54 @@ compile.giveup() descr.store_final_boxes(op, newboxes) # - if op.opnum == rop.GUARD_VALUE: - if self.getvalue(op.args[0]) in self.bool_boxes: + if op.getopnum() == rop.GUARD_VALUE: + if self.getvalue(op.getarg(0)) in self.bool_boxes: # Hack: turn guard_value(bool) into guard_true/guard_false. # This is done after the operation is emitted to let # store_final_boxes_in_guard set the guard_opnum field of the # descr to the original rop.GUARD_VALUE. - constvalue = op.args[1].getint() + constvalue = op.getarg(1).getint() if constvalue == 0: opnum = rop.GUARD_FALSE elif constvalue == 1: opnum = rop.GUARD_TRUE else: raise AssertionError("uh?") - op.opnum = opnum - op.args = [op.args[0]] + newop = ResOperation(opnum, [op.getarg(0)], op.result, descr) + newop.setfailargs(op.getfailargs()) + return newop else: # a real GUARD_VALUE. Make it use one counter per value. descr.make_a_counter_per_value(op) + return op def make_args_key(self, op): - args = op.args[:] - for i in range(len(args)): - arg = args[i] + args = [] + for i in range(op.numargs()): + arg = op.getarg(i) if arg in self.values: - args[i] = self.values[arg].get_key_box() - args.append(ConstInt(op.opnum)) + args.append(self.values[arg].get_key_box()) + else: + args.append(arg) + args.append(ConstInt(op.getopnum())) return args - + def optimize_default(self, op): canfold = op.is_always_pure() is_ovf = op.is_ovf() if is_ovf: nextop = self.loop.operations[self.i + 1] - canfold = nextop.opnum == rop.GUARD_NO_OVERFLOW + canfold = nextop.getopnum() == rop.GUARD_NO_OVERFLOW if canfold: - for arg in op.args: - if self.get_constant_box(arg) is None: + for i in range(op.numargs()): + if self.get_constant_box(op.getarg(i)) is None: break else: # all constant arguments: constant-fold away - argboxes = [self.get_constant_box(arg) for arg in op.args] + argboxes = [self.get_constant_box(op.getarg(i)) + for i in range(op.numargs())] resbox = execute_nonspec(self.cpu, None, - op.opnum, argboxes, op.descr) + op.getopnum(), argboxes, op.getdescr()) self.make_constant(op.result, resbox.constbox()) if is_ovf: self.i += 1 # skip next operation, it is the unneeded guard @@ -399,8 +404,8 @@ # did we do the exact same operation already? args = self.make_args_key(op) oldop = self.pure_operations.get(args, None) - if oldop is not None and oldop.descr is op.descr: - assert oldop.opnum == op.opnum + if oldop is not None and oldop.getdescr() is op.getdescr(): + assert oldop.getopnum() == op.getopnum() self.make_equal_to(op.result, self.getvalue(oldop.result)) if is_ovf: self.i += 1 # skip next operation, it is the unneeded guard Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py Wed Sep 22 14:17:16 2010 @@ -14,7 +14,7 @@ if self.find_rewritable_bool(op, args): return - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) @@ -24,7 +24,7 @@ def try_boolinvers(self, op, targs): oldop = self.optimizer.pure_operations.get(targs, None) - if oldop is not None and oldop.descr is op.descr: + if oldop is not None and oldop.getdescr() is op.getdescr(): value = self.getvalue(oldop.result) if value.is_constant(): if value.box.same_constant(CONST_1): @@ -39,7 +39,7 @@ def find_rewritable_bool(self, op, args): try: - oldopnum = opboolinvers[op.opnum] + oldopnum = opboolinvers[op.getopnum()] targs = [args[0], args[1], ConstInt(oldopnum)] if self.try_boolinvers(op, targs): return True @@ -47,17 +47,17 @@ pass try: - oldopnum = opboolreflex[op.opnum] # FIXME: add INT_ADD, INT_MUL + oldopnum = opboolreflex[op.getopnum()] # FIXME: add INT_ADD, INT_MUL targs = [args[1], args[0], ConstInt(oldopnum)] oldop = self.optimizer.pure_operations.get(targs, None) - if oldop is not None and oldop.descr is op.descr: + if oldop is not None and oldop.getdescr() is op.getdescr(): self.make_equal_to(op.result, self.getvalue(oldop.result)) return True except KeyError: pass try: - oldopnum = opboolinvers[opboolreflex[op.opnum]] + oldopnum = opboolinvers[opboolreflex[op.getopnum()]] targs = [args[1], args[0], ConstInt(oldopnum)] if self.try_boolinvers(op, targs): return True @@ -67,16 +67,16 @@ return False def optimize_INT_AND(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.is_null() or v2.is_null(): self.make_constant_int(op.result, 0) else: self.emit_operation(op) def optimize_INT_OR(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.is_null(): self.make_equal_to(op.result, v2) elif v2.is_null(): @@ -85,20 +85,20 @@ self.emit_operation(op) def optimize_INT_SUB(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v2.is_constant() and v2.box.getint() == 0: self.make_equal_to(op.result, v1) else: self.emit_operation(op) # Synthesize the reverse ops for optimize_default to reuse - self.pure(rop.INT_ADD, [op.result, op.args[1]], op.args[0]) - self.pure(rop.INT_SUB, [op.args[0], op.result], op.args[1]) + self.pure(rop.INT_ADD, [op.result, op.getarg(1)], op.getarg(0)) + self.pure(rop.INT_SUB, [op.getarg(0), op.result], op.getarg(1)) def optimize_INT_ADD(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) # If one side of the op is 0 the result is the other side. if v1.is_constant() and v1.box.getint() == 0: @@ -109,12 +109,12 @@ self.emit_operation(op) # Synthesize the reverse op for optimize_default to reuse - self.pure(rop.INT_SUB, [op.result, op.args[1]], op.args[0]) - self.pure(rop.INT_SUB, [op.result, op.args[0]], op.args[1]) + self.pure(rop.INT_SUB, [op.result, op.getarg(1)], op.getarg(0)) + self.pure(rop.INT_SUB, [op.result, op.getarg(0)], op.getarg(1)) def optimize_INT_MUL(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) # If one side of the op is 1 the result is the other side. if v1.is_constant() and v1.box.getint() == 1: @@ -128,18 +128,20 @@ self.emit_operation(op) def optimize_CALL_PURE(self, op): - for arg in op.args: + for i in range(op.numargs()): + arg = op.getarg(i) if self.get_constant_box(arg) is None: break else: # all constant arguments: constant-fold away - self.make_constant(op.result, op.args[0]) + self.make_constant(op.result, op.getarg(0)) return # replace CALL_PURE with just CALL - self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result, - op.descr)) + args = op.getarglist()[1:] + self.emit_operation(ResOperation(rop.CALL, args, op.result, + op.getdescr())) def optimize_guard(self, op, constbox, emit_operation=True): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_constant(): box = value.box assert isinstance(box, Const) @@ -151,7 +153,7 @@ value.make_constant(constbox) def optimize_GUARD_ISNULL(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_null(): return elif value.is_nonnull(): @@ -160,7 +162,7 @@ value.make_constant(self.optimizer.cpu.ts.CONST_NULL) def optimize_GUARD_NONNULL(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_nonnull(): return elif value.is_null(): @@ -169,25 +171,25 @@ value.make_nonnull(len(self.optimizer.newoperations) - 1) def optimize_GUARD_VALUE(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) emit_operation = True if value.last_guard_index != -1: # there already has been a guard_nonnull or guard_class or # guard_nonnull_class on this value, which is rather silly. # replace the original guard with a guard_value old_guard_op = self.optimizer.newoperations[value.last_guard_index] - old_opnum = old_guard_op.opnum - old_guard_op.opnum = rop.GUARD_VALUE - old_guard_op.args = [old_guard_op.args[0], op.args[1]] + new_guard_op = old_guard_op.copy_and_change(rop.GUARD_VALUE, + args = [old_guard_op.getarg(0), op.getarg(1)]) + self.optimizer.newoperations[value.last_guard_index] = new_guard_op # hack hack hack. Change the guard_opnum on - # old_guard_op.descr so that when resuming, + # new_guard_op.getdescr() so that when resuming, # the operation is not skipped by pyjitpl.py. - descr = old_guard_op.descr + descr = new_guard_op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) descr.guard_opnum = rop.GUARD_VALUE - descr.make_a_counter_per_value(old_guard_op) + descr.make_a_counter_per_value(new_guard_op) emit_operation = False - constbox = op.args[1] + constbox = op.getarg(1) assert isinstance(constbox, Const) self.optimize_guard(op, constbox, emit_operation) @@ -198,8 +200,8 @@ self.optimize_guard(op, CONST_0) def optimize_GUARD_CLASS(self, op): - value = self.getvalue(op.args[0]) - expectedclassbox = op.args[1] + value = self.getvalue(op.getarg(0)) + expectedclassbox = op.getarg(1) assert isinstance(expectedclassbox, Const) realclassbox = value.get_constant_class(self.optimizer.cpu) if realclassbox is not None: @@ -213,15 +215,16 @@ # there already has been a guard_nonnull or guard_class or # guard_nonnull_class on this value. old_guard_op = self.optimizer.newoperations[value.last_guard_index] - if old_guard_op.opnum == rop.GUARD_NONNULL: + if old_guard_op.getopnum() == rop.GUARD_NONNULL: # it was a guard_nonnull, which we replace with a # guard_nonnull_class. - old_guard_op.opnum = rop.GUARD_NONNULL_CLASS - old_guard_op.args = [old_guard_op.args[0], op.args[1]] + new_guard_op = old_guard_op.copy_and_change (rop.GUARD_NONNULL_CLASS, + args = [old_guard_op.getarg(0), op.getarg(1)]) + self.optimizer.newoperations[value.last_guard_index] = new_guard_op # hack hack hack. Change the guard_opnum on - # old_guard_op.descr so that when resuming, + # new_guard_op.getdescr() so that when resuming, # the operation is not skipped by pyjitpl.py. - descr = old_guard_op.descr + descr = new_guard_op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) descr.guard_opnum = rop.GUARD_NONNULL_CLASS emit_operation = False @@ -239,18 +242,18 @@ self.optimizer.exception_might_have_happened = False def optimize_CALL_LOOPINVARIANT(self, op): - funcvalue = self.getvalue(op.args[0]) + funcvalue = self.getvalue(op.getarg(0)) if not funcvalue.is_constant(): self.emit_operation(op) return - key = make_hashable_int(op.args[0].getint()) + key = make_hashable_int(op.getarg(0).getint()) resvalue = self.optimizer.loop_invariant_results.get(key, None) if resvalue is not None: self.make_equal_to(op.result, resvalue) return # change the op to be a normal call, from the backend's point of view # there is no reason to have a separate operation for this - op.opnum = rop.CALL + op = op.copy_and_change(rop.CALL) self.emit_operation(op) resvalue = self.getvalue(op.result) self.optimizer.loop_invariant_results[key] = resvalue @@ -265,17 +268,17 @@ self.emit_operation(op) def optimize_INT_IS_TRUE(self, op): - if self.getvalue(op.args[0]) in self.optimizer.bool_boxes: - self.make_equal_to(op.result, self.getvalue(op.args[0])) + if self.getvalue(op.getarg(0)) in self.optimizer.bool_boxes: + self.make_equal_to(op.result, self.getvalue(op.getarg(0))) return - self._optimize_nullness(op, op.args[0], True) + self._optimize_nullness(op, op.getarg(0), True) def optimize_INT_IS_ZERO(self, op): - self._optimize_nullness(op, op.args[0], False) + self._optimize_nullness(op, op.getarg(0), False) def _optimize_oois_ooisnot(self, op, expect_isnot): - value0 = self.getvalue(op.args[0]) - value1 = self.getvalue(op.args[1]) + value0 = self.getvalue(op.getarg(0)) + value1 = self.getvalue(op.getarg(1)) if value0.is_virtual(): if value1.is_virtual(): intres = (value0 is value1) ^ expect_isnot @@ -285,9 +288,9 @@ elif value1.is_virtual(): self.make_constant_int(op.result, expect_isnot) elif value1.is_null(): - self._optimize_nullness(op, op.args[0], expect_isnot) + self._optimize_nullness(op, op.getarg(0), expect_isnot) elif value0.is_null(): - self._optimize_nullness(op, op.args[1], expect_isnot) + self._optimize_nullness(op, op.getarg(1), expect_isnot) elif value0 is value1: self.make_constant_int(op.result, not expect_isnot) else: @@ -308,10 +311,10 @@ self._optimize_oois_ooisnot(op, False) def optimize_INSTANCEOF(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) realclassbox = value.get_constant_class(self.optimizer.cpu) if realclassbox is not None: - checkclassbox = self.optimizer.cpu.typedescr2classbox(op.descr) + checkclassbox = self.optimizer.cpu.typedescr2classbox(op.getdescr()) result = self.optimizer.cpu.ts.subclassOf(self.optimizer.cpu, realclassbox, checkclassbox) Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/virtualize.py Wed Sep 22 14:17:16 2010 @@ -258,7 +258,7 @@ def setup(self, virtuals): if not virtuals: return - + inputargs = self.optimizer.loop.inputargs specnodes = self.optimizer.loop.token.specnodes assert len(inputargs) == len(specnodes) @@ -285,18 +285,18 @@ def optimize_JUMP(self, op): orgop = self.optimizer.loop.operations[-1] exitargs = [] - target_loop_token = orgop.descr + target_loop_token = orgop.getdescr() assert isinstance(target_loop_token, LoopToken) specnodes = target_loop_token.specnodes - assert len(op.args) == len(specnodes) + assert op.numargs() == len(specnodes) for i in range(len(specnodes)): - value = self.getvalue(op.args[i]) + value = self.getvalue(op.getarg(i)) specnodes[i].teardown_virtual_node(self, value, exitargs) - op.args = exitargs[:] + op = op.copy_and_change(op.getopnum(), args=exitargs[:]) self.emit_operation(op) def optimize_VIRTUAL_REF(self, op): - indexbox = op.args[1] + indexbox = op.getarg(1) # # get some constants vrefinfo = self.optimizer.metainterp_sd.virtualref_info @@ -322,17 +322,17 @@ # typically a PyPy PyFrame, and now is the end of its execution, so # forcing it now does not have catastrophic effects. vrefinfo = self.optimizer.metainterp_sd.virtualref_info - # op.args[1] should really never point to null here + # op.getarg(1) should really never point to null here # - set 'forced' to point to the real object - op1 = ResOperation(rop.SETFIELD_GC, op.args, None, + op1 = ResOperation(rop.SETFIELD_GC, op.getarglist(), None, descr = vrefinfo.descr_forced) self.optimize_SETFIELD_GC(op1) # - set 'virtual_token' to TOKEN_NONE - args = [op.args[0], ConstInt(vrefinfo.TOKEN_NONE)] + args = [op.getarg(0), ConstInt(vrefinfo.TOKEN_NONE)] op1 = ResOperation(rop.SETFIELD_GC, args, None, descr = vrefinfo.descr_virtual_token) self.optimize_SETFIELD_GC(op1) - # Note that in some cases the virtual in op.args[1] has been forced + # Note that in some cases the virtual in op.getarg(1) has been forced # already. This is fine. In that case, and *if* a residual # CALL_MAY_FORCE suddenly turns out to access it, then it will # trigger a ResumeGuardForcedDescr.handle_async_forcing() which @@ -340,11 +340,11 @@ # was already forced). def optimize_GETFIELD_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_virtual(): # optimizefindnode should ensure that fieldvalue is found assert isinstance(value, AbstractVirtualValue) - fieldvalue = value.getfield(op.descr, None) + fieldvalue = value.getfield(op.getdescr(), None) assert fieldvalue is not None self.make_equal_to(op.result, fieldvalue) else: @@ -357,36 +357,36 @@ optimize_GETFIELD_GC_PURE = optimize_GETFIELD_GC def optimize_SETFIELD_GC(self, op): - value = self.getvalue(op.args[0]) - fieldvalue = self.getvalue(op.args[1]) + value = self.getvalue(op.getarg(0)) + fieldvalue = self.getvalue(op.getarg(1)) if value.is_virtual(): - value.setfield(op.descr, fieldvalue) + value.setfield(op.getdescr(), fieldvalue) else: value.ensure_nonnull() ###self.heap_op_optimizer.optimize_SETFIELD_GC(op, value, fieldvalue) self.emit_operation(op) def optimize_NEW_WITH_VTABLE(self, op): - self.make_virtual(op.args[0], op.result, op) + self.make_virtual(op.getarg(0), op.result, op) def optimize_NEW(self, op): - self.make_vstruct(op.descr, op.result, op) + self.make_vstruct(op.getdescr(), op.result, op) def optimize_NEW_ARRAY(self, op): - sizebox = self.get_constant_box(op.args[0]) + sizebox = self.get_constant_box(op.getarg(0)) if sizebox is not None: # if the original 'op' did not have a ConstInt as argument, # build a new one with the ConstInt argument - if not isinstance(op.args[0], ConstInt): + if not isinstance(op.getarg(0), ConstInt): op = ResOperation(rop.NEW_ARRAY, [sizebox], op.result, - descr=op.descr) - self.make_varray(op.descr, sizebox.getint(), op.result, op) + descr=op.getdescr()) + self.make_varray(op.getdescr(), sizebox.getint(), op.result, op) else: ###self.optimize_default(op) self.emit_operation(op) def optimize_ARRAYLEN_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_virtual(): self.make_constant_int(op.result, value.getlength()) else: @@ -395,9 +395,9 @@ self.emit_operation(op) def optimize_GETARRAYITEM_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_virtual(): - indexbox = self.get_constant_box(op.args[1]) + indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: itemvalue = value.getitem(indexbox.getint()) self.make_equal_to(op.result, itemvalue) @@ -411,22 +411,22 @@ optimize_GETARRAYITEM_GC_PURE = optimize_GETARRAYITEM_GC def optimize_SETARRAYITEM_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_virtual(): - indexbox = self.get_constant_box(op.args[1]) + indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: - value.setitem(indexbox.getint(), self.getvalue(op.args[2])) + value.setitem(indexbox.getint(), self.getvalue(op.getarg(2))) return value.ensure_nonnull() ###self.heap_op_optimizer.optimize_SETARRAYITEM_GC(op, value, fieldvalue) self.emit_operation(op) def optimize_ARRAYCOPY(self, op): - source_value = self.getvalue(op.args[2]) - dest_value = self.getvalue(op.args[3]) - source_start_box = self.get_constant_box(op.args[4]) - dest_start_box = self.get_constant_box(op.args[5]) - length = self.get_constant_box(op.args[6]) + source_value = self.getvalue(op.getarg(2)) + dest_value = self.getvalue(op.getarg(3)) + source_start_box = self.get_constant_box(op.getarg(4)) + dest_start_box = self.get_constant_box(op.getarg(5)) + length = self.get_constant_box(op.getarg(6)) if (source_value.is_virtual() and source_start_box and dest_start_box and length and dest_value.is_virtual()): # XXX optimize the case where dest value is not virtual, @@ -439,13 +439,14 @@ return if length and length.getint() == 0: return # 0-length arraycopy - descr = op.args[0] + descr = op.getarg(0) assert isinstance(descr, AbstractDescr) - self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result, + args = op.getarglist()[1:] + self.emit_operation(ResOperation(rop.CALL, args, op.result, descr)) def propagate_forward(self, op): - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) Modified: pypy/trunk/pypy/jit/metainterp/pyjitpl.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/pyjitpl.py (original) +++ pypy/trunk/pypy/jit/metainterp/pyjitpl.py Wed Sep 22 14:17:16 2010 @@ -159,7 +159,7 @@ if got_type == history.INT: self.registers_i[target_index] = resultbox elif got_type == history.REF: - #debug_print(' ->', + #debug_print(' ->', # llmemory.cast_ptr_to_adr(resultbox.getref_base())) self.registers_r[target_index] = resultbox elif got_type == history.FLOAT: @@ -446,7 +446,7 @@ def opimpl_newlist(self, structdescr, lengthdescr, itemsdescr, arraydescr, sizebox): sbox = self.metainterp.execute_and_record(rop.NEW, structdescr) - self.metainterp.execute_and_record(rop.SETFIELD_GC, lengthdescr, + self.metainterp.execute_and_record(rop.SETFIELD_GC, lengthdescr, sbox, sizebox) abox = self.metainterp.execute_and_record(rop.NEW_ARRAY, arraydescr, sizebox) @@ -1004,7 +1004,7 @@ resumedescr = compile.ResumeGuardDescr(metainterp_sd, original_greenkey) guard_op = metainterp.history.record(opnum, moreargs, None, - descr=resumedescr) + descr=resumedescr) virtualizable_boxes = None if metainterp.jitdriver_sd.virtualizable_info is not None: virtualizable_boxes = metainterp.virtualizable_boxes @@ -1463,7 +1463,7 @@ resbox = self._record_helper_nonpure_varargs(opnum, resbox, descr, argboxes) return resbox - def _record_helper_pure(self, opnum, resbox, descr, *argboxes): + def _record_helper_pure(self, opnum, resbox, descr, *argboxes): canfold = self._all_constants(*argboxes) if canfold: resbox = resbox.constbox() # ensure it is a Const @@ -1472,7 +1472,7 @@ resbox = resbox.nonconstbox() # ensure it is a Box return self._record_helper_nonpure_varargs(opnum, resbox, descr, list(argboxes)) - def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes): + def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes): canfold = self._all_constants_varargs(argboxes) if canfold: resbox = resbox.constbox() # ensure it is a Const @@ -1485,7 +1485,7 @@ assert resbox is None or isinstance(resbox, Box) # record the operation profiler = self.staticdata.profiler - profiler.count_ops(opnum, RECORDED_OPS) + profiler.count_ops(opnum, RECORDED_OPS) op = self.history.record(opnum, argboxes, resbox, descr) self.attach_debug_info(op) return resbox @@ -1667,7 +1667,7 @@ # Search in current_merge_points for original_boxes with compatible # green keys, representing the beginning of the same loop as the one - # we end now. + # we end now. num_green_args = self.jitdriver_sd.num_green_args for j in range(len(self.current_merge_points)-1, -1, -1): @@ -1922,7 +1922,7 @@ vrefbox = self.virtualref_boxes[i+1] # record VIRTUAL_REF_FINISH just before the current CALL_MAY_FORCE call_may_force_op = self.history.operations.pop() - assert call_may_force_op.opnum == rop.CALL_MAY_FORCE + assert call_may_force_op.getopnum() == rop.CALL_MAY_FORCE self.history.record(rop.VIRTUAL_REF_FINISH, [vrefbox, virtualbox], None) self.history.operations.append(call_may_force_op) @@ -2088,10 +2088,10 @@ """ Patch a CALL into a CALL_PURE. """ op = self.history.operations[-1] - assert op.opnum == rop.CALL + assert op.getopnum() == rop.CALL resbox_as_const = resbox.constbox() - for arg in op.args: - if not isinstance(arg, Const): + for i in range(op.numargs()): + if not isinstance(op.getarg(i), Const): break else: # all-constants: remove the CALL operation now and propagate a @@ -2100,8 +2100,8 @@ return resbox_as_const # not all constants (so far): turn CALL into CALL_PURE, which might # be either removed later by optimizeopt or turned back into CALL. - op.opnum = rop.CALL_PURE - op.args = [resbox_as_const] + op.args + newop = op.copy_and_change(rop.CALL_PURE, args=[resbox_as_const]+op.getarglist()) + self.history.operations[-1] = newop return resbox def direct_assembler_call(self, targetjitdriver_sd): @@ -2109,10 +2109,11 @@ patching the CALL_MAY_FORCE that occurred just now. """ op = self.history.operations.pop() - assert op.opnum == rop.CALL_MAY_FORCE + assert op.getopnum() == rop.CALL_MAY_FORCE num_green_args = targetjitdriver_sd.num_green_args - greenargs = op.args[1:num_green_args+1] - args = op.args[num_green_args+1:] + arglist = op.getarglist() + greenargs = arglist[1:num_green_args+1] + args = arglist[num_green_args+1:] assert len(args) == targetjitdriver_sd.num_red_args vinfo = targetjitdriver_sd.virtualizable_info if vinfo is not None: @@ -2122,9 +2123,7 @@ # ^^^ and not "+=", which makes 'args' a resizable list warmrunnerstate = targetjitdriver_sd.warmstate token = warmrunnerstate.get_assembler_token(greenargs, args) - op.opnum = rop.CALL_ASSEMBLER - op.args = args - op.descr = token + op = op.copy_and_change(rop.CALL_ASSEMBLER, args=args, descr=token) self.history.operations.append(op) # ____________________________________________________________ Modified: pypy/trunk/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/resoperation.py (original) +++ pypy/trunk/pypy/jit/metainterp/resoperation.py Wed Sep 22 14:17:16 2010 @@ -1,42 +1,90 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import make_sure_not_resized -class ResOperation(object): - """The central ResOperation class, representing one operation.""" +def ResOperation(opnum, args, result, descr=None): + cls = opclasses[opnum] + op = cls(result) + op.initarglist(args) + if descr is not None: + assert isinstance(op, ResOpWithDescr) + op.setdescr(descr) + return op + - # for 'guard_*' - fail_args = None +class AbstractResOp(object): + """The central ResOperation class, representing one operation.""" # debug name = "" pc = 0 - def __init__(self, opnum, args, result, descr=None): - make_sure_not_resized(args) - assert isinstance(opnum, int) - self.opnum = opnum - self.args = list(args) - make_sure_not_resized(self.args) - assert not isinstance(result, list) + def __init__(self, result): self.result = result - self.setdescr(descr) + + # methods implemented by each concrete class + # ------------------------------------------ + + def getopnum(self): + raise NotImplementedError + + # methods implemented by the arity mixins + # --------------------------------------- + + def initarglist(self, args): + "This is supposed to be called only just after the ResOp has been created" + raise NotImplementedError + + def getarglist(self): + raise NotImplementedError + + def getarg(self, i): + raise NotImplementedError + + def setarg(self, i, box): + raise NotImplementedError + + def numargs(self): + raise NotImplementedError + + + # methods implemented by GuardResOp + # --------------------------------- + + def getfailargs(self): + return None + + def setfailargs(self, fail_args): + raise NotImplementedError + + # methods implemented by ResOpWithDescr + # ------------------------------------- + + def getdescr(self): + return None def setdescr(self, descr): - # for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt - # instance provided by the backend holding details about the type - # of the operation. It must inherit from AbstractDescr. The - # backend provides it with cpu.fielddescrof(), cpu.arraydescrof(), - # cpu.calldescrof(), and cpu.typedescrof(). - from pypy.jit.metainterp.history import check_descr - check_descr(descr) - self.descr = descr + raise NotImplementedError + + # common methods + # -------------- + + def copy_and_change(self, opnum, args=None, result=None, descr=None): + "shallow copy: the returned operation is meant to be used in place of self" + if args is None: + args = self.getarglist() + if result is None: + result = self.result + if descr is None: + descr = self.getdescr() + newop = ResOperation(opnum, args, result, descr) + return newop def clone(self): - descr = self.descr + args = self.getarglist() + descr = self.getdescr() if descr is not None: descr = descr.clone_if_mutable() - op = ResOperation(self.opnum, self.args, self.result, descr) - op.fail_args = self.fail_args + op = ResOperation(self.getopnum(), args, self.result, descr) if not we_are_translated(): op.name = self.name op.pc = self.pc @@ -55,82 +103,271 @@ prefix = "%s:%s " % (self.name, self.pc) else: prefix = "" - if self.descr is None or we_are_translated(): + args = self.getarglist() + descr = self.getdescr() + if descr is None or we_are_translated(): return '%s%s%s(%s)' % (prefix, sres, self.getopname(), - ', '.join([str(a) for a in self.args])) + ', '.join([str(a) for a in args])) else: return '%s%s%s(%s, descr=%r)' % (prefix, sres, self.getopname(), - ', '.join([str(a) for a in self.args]), self.descr) + ', '.join([str(a) for a in args]), descr) def getopname(self): try: - return opname[self.opnum].lower() + return opname[self.getopnum()].lower() except KeyError: - return '<%d>' % self.opnum + return '<%d>' % self.getopnum() def is_guard(self): - return rop._GUARD_FIRST <= self.opnum <= rop._GUARD_LAST + return rop._GUARD_FIRST <= self.getopnum() <= rop._GUARD_LAST def is_foldable_guard(self): - return rop._GUARD_FOLDABLE_FIRST <= self.opnum <= rop._GUARD_FOLDABLE_LAST + return rop._GUARD_FOLDABLE_FIRST <= self.getopnum() <= rop._GUARD_FOLDABLE_LAST def is_guard_exception(self): - return (self.opnum == rop.GUARD_EXCEPTION or - self.opnum == rop.GUARD_NO_EXCEPTION) + return (self.getopnum() == rop.GUARD_EXCEPTION or + self.getopnum() == rop.GUARD_NO_EXCEPTION) def is_guard_overflow(self): - return (self.opnum == rop.GUARD_OVERFLOW or - self.opnum == rop.GUARD_NO_OVERFLOW) + return (self.getopnum() == rop.GUARD_OVERFLOW or + self.getopnum() == rop.GUARD_NO_OVERFLOW) def is_always_pure(self): - return rop._ALWAYS_PURE_FIRST <= self.opnum <= rop._ALWAYS_PURE_LAST + return rop._ALWAYS_PURE_FIRST <= self.getopnum() <= rop._ALWAYS_PURE_LAST def has_no_side_effect(self): - return rop._NOSIDEEFFECT_FIRST <= self.opnum <= rop._NOSIDEEFFECT_LAST + return rop._NOSIDEEFFECT_FIRST <= self.getopnum() <= rop._NOSIDEEFFECT_LAST def can_raise(self): - return rop._CANRAISE_FIRST <= self.opnum <= rop._CANRAISE_LAST + return rop._CANRAISE_FIRST <= self.getopnum() <= rop._CANRAISE_LAST def is_ovf(self): - return rop._OVF_FIRST <= self.opnum <= rop._OVF_LAST + return rop._OVF_FIRST <= self.getopnum() <= rop._OVF_LAST def is_comparison(self): return self.is_always_pure() and self.returns_bool_result() def is_final(self): - return rop._FINAL_FIRST <= self.opnum <= rop._FINAL_LAST + return rop._FINAL_FIRST <= self.getopnum() <= rop._FINAL_LAST def returns_bool_result(self): - opnum = self.opnum + opnum = self.getopnum() if we_are_translated(): assert opnum >= 0 elif opnum < 0: return False # for tests return opboolresult[opnum] + +# =================== +# Top of the hierachy +# =================== + +class PlainResOp(AbstractResOp): + pass + +class ResOpWithDescr(AbstractResOp): + + _descr = None + + def getdescr(self): + return self._descr + + def setdescr(self, descr): + # for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt + # instance provided by the backend holding details about the type + # of the operation. It must inherit from AbstractDescr. The + # backend provides it with cpu.fielddescrof(), cpu.arraydescrof(), + # cpu.calldescrof(), and cpu.typedescrof(). + from pypy.jit.metainterp.history import check_descr + check_descr(descr) + self._descr = descr + +class GuardResOp(ResOpWithDescr): + + _fail_args = None + + def getfailargs(self): + return self._fail_args + + def setfailargs(self, fail_args): + self._fail_args = fail_args + + def copy_and_change(self, opnum, args=None, result=None, descr=None): + newop = AbstractResOp.copy_and_change(self, opnum, args, result, descr) + newop.setfailargs(self.getfailargs()) + return newop + + def clone(self): + newop = AbstractResOp.clone(self) + newop.setfailargs(self.getfailargs()) + return newop + + +# ============ +# arity mixins +# ============ + +class NullaryOp(object): + _mixin_ = True + + def initarglist(self, args): + assert len(args) == 0 + + def getarglist(self): + return [] + + def numargs(self): + return 0 + + def getarg(self, i): + raise IndexError + + def setarg(self, i, box): + raise IndexError + + +class UnaryOp(object): + _mixin_ = True + _arg0 = None + + def initarglist(self, args): + assert len(args) == 1 + self._arg0, = args + + def getarglist(self): + return [self._arg0] + + def numargs(self): + return 1 + + def getarg(self, i): + if i == 0: + return self._arg0 + else: + raise IndexError + + def setarg(self, i, box): + if i == 0: + self._arg0 = box + else: + raise IndexError + + +class BinaryOp(object): + _mixin_ = True + _arg0 = None + _arg1 = None + + def initarglist(self, args): + assert len(args) == 2 + self._arg0, self._arg1 = args + + def getarglist(self): + return [self._arg0, self._arg1, self._arg2] + + def numargs(self): + return 2 + + def getarg(self, i): + if i == 0: + return self._arg0 + elif i == 1: + return self._arg1 + else: + raise IndexError + + def setarg(self, i, box): + if i == 0: + self._arg0 = box + elif i == 1: + self._arg1 = box + else: + raise IndexError + + def getarglist(self): + return [self._arg0, self._arg1] + + +class TernaryOp(object): + _mixin_ = True + _arg0 = None + _arg1 = None + _arg2 = None + + def initarglist(self, args): + assert len(args) == 3 + self._arg0, self._arg1, self._arg2 = args + + def getarglist(self): + return [self._arg0, self._arg1, self._arg2] + + def numargs(self): + return 3 + + def getarg(self, i): + if i == 0: + return self._arg0 + elif i == 1: + return self._arg1 + elif i == 2: + return self._arg2 + else: + raise IndexError + + def setarg(self, i, box): + if i == 0: + self._arg0 = box + elif i == 1: + self._arg1 = box + elif i == 2: + self._arg2 = box + else: + raise IndexError + +class N_aryOp(object): + _mixin_ = True + _args = None + + def initarglist(self, args): + self._args = args + + def getarglist(self): + return self._args + + def numargs(self): + return len(self._args) + + def getarg(self, i): + return self._args[i] + + def setarg(self, i, box): + self._args[i] = box + + # ____________________________________________________________ _oplist = [ '_FINAL_FIRST', - 'JUMP', - 'FINISH', + 'JUMP/*d', + 'FINISH/*d', '_FINAL_LAST', '_GUARD_FIRST', '_GUARD_FOLDABLE_FIRST', - 'GUARD_TRUE', - 'GUARD_FALSE', - 'GUARD_VALUE', - 'GUARD_CLASS', - 'GUARD_NONNULL', - 'GUARD_ISNULL', - 'GUARD_NONNULL_CLASS', + 'GUARD_TRUE/1d', + 'GUARD_FALSE/1d', + 'GUARD_VALUE/2d', + 'GUARD_CLASS/2d', + 'GUARD_NONNULL/1d', + 'GUARD_ISNULL/1d', + 'GUARD_NONNULL_CLASS/2d', '_GUARD_FOLDABLE_LAST', - 'GUARD_NO_EXCEPTION', - 'GUARD_EXCEPTION', - 'GUARD_NO_OVERFLOW', - 'GUARD_OVERFLOW', - 'GUARD_NOT_FORCED', + 'GUARD_NO_EXCEPTION/0d', + 'GUARD_EXCEPTION/1d', + 'GUARD_NO_OVERFLOW/0d', + 'GUARD_OVERFLOW/0d', + 'GUARD_NOT_FORCED/0d', '_GUARD_LAST', # ----- end of guard operations ----- '_NOSIDEEFFECT_FIRST', # ----- start of no_side_effect operations ----- @@ -218,19 +455,19 @@ 'STRSETITEM/3', 'UNICODESETITEM/3', 'NEWUNICODE/1', - #'RUNTIMENEW/1', # ootype operation - 'COND_CALL_GC_WB', # [objptr, newvalue] (for the write barrier) + #'RUNTIMENEW/1', # ootype operation + 'COND_CALL_GC_WB/2d', # [objptr, newvalue] (for the write barrier) 'DEBUG_MERGE_POINT/1', # debugging only 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend '_CANRAISE_FIRST', # ----- start of can_raise operations ----- - 'CALL', - 'CALL_ASSEMBLER', - 'CALL_MAY_FORCE', - 'CALL_LOOPINVARIANT', + 'CALL/*d', + 'CALL_ASSEMBLER/*d', + 'CALL_MAY_FORCE/*d', + 'CALL_LOOPINVARIANT/*d', #'OOSEND', # ootype operation #'OOSEND_PURE', # ootype operation - 'CALL_PURE', # removed before it's passed to the backend + 'CALL_PURE/*d', # removed before it's passed to the backend # CALL_PURE(result, func, arg_1,..,arg_n) '_CANRAISE_LAST', # ----- end of can_raise operations ----- @@ -247,6 +484,7 @@ class rop(object): pass +opclasses = [] # mapping numbers to the concrete ResOp class opname = {} # mapping numbers to the original names, for debugging oparity = [] # mapping numbers to the arity of the operation or -1 opwithdescr = [] # mapping numbers to a flag "takes a descr" @@ -261,16 +499,62 @@ name, arity = name.split('/') withdescr = 'd' in arity boolresult = 'b' in arity - arity = int(arity.rstrip('db')) + arity = arity.rstrip('db') + if arity == '*': + arity = -1 + else: + arity = int(arity) else: arity, withdescr, boolresult = -1, True, False # default setattr(rop, name, i) if not name.startswith('_'): opname[i] = name + cls = create_class_for_op(name, i, arity, withdescr) + else: + cls = None + opclasses.append(cls) oparity.append(arity) opwithdescr.append(withdescr) opboolresult.append(boolresult) - assert len(oparity)==len(opwithdescr)==len(opboolresult)==len(_oplist) + assert len(opclasses)==len(oparity)==len(opwithdescr)==len(opboolresult)==len(_oplist) + +def get_base_class(mixin, base): + try: + return get_base_class.cache[(mixin, base)] + except KeyError: + arity_name = mixin.__name__[:-2] # remove the trailing "Op" + name = arity_name + base.__name__ # something like BinaryPlainResOp + bases = (mixin, base) + cls = type(name, bases, {}) + get_base_class.cache[(mixin, base)] = cls + return cls +get_base_class.cache = {} + +def create_class_for_op(name, opnum, arity, withdescr): + arity2mixin = { + 0: NullaryOp, + 1: UnaryOp, + 2: BinaryOp, + 3: TernaryOp + } + + is_guard = name.startswith('GUARD') + if is_guard: + assert withdescr + baseclass = GuardResOp + elif withdescr: + baseclass = ResOpWithDescr + else: + baseclass = PlainResOp + mixin = arity2mixin.get(arity, N_aryOp) + + def getopnum(self): + return opnum + + cls_name = '%s_OP' % name + bases = (get_base_class(mixin, baseclass),) + dic = {'getopnum': getopnum} + return type(cls_name, bases, dic) setup(__name__ == '__main__') # print out the table when run directly del _oplist Modified: pypy/trunk/pypy/jit/metainterp/simple_optimize.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/simple_optimize.py (original) +++ pypy/trunk/pypy/jit/metainterp/simple_optimize.py Wed Sep 22 14:17:16 2010 @@ -11,15 +11,17 @@ from pypy.jit.metainterp.history import AbstractDescr # change ARRAYCOPY to call, so we don't have to pass around # unnecessary information to the backend. Do the same with VIRTUAL_REF_*. - if op.opnum == rop.ARRAYCOPY: - descr = op.args[0] + if op.getopnum() == rop.ARRAYCOPY: + descr = op.getarg(0) assert isinstance(descr, AbstractDescr) - op = ResOperation(rop.CALL, op.args[1:], op.result, descr=descr) - elif op.opnum == rop.CALL_PURE: - op = ResOperation(rop.CALL, op.args[1:], op.result, op.descr) - elif op.opnum == rop.VIRTUAL_REF: - op = ResOperation(rop.SAME_AS, [op.args[0]], op.result) - elif op.opnum == rop.VIRTUAL_REF_FINISH: + args = op.getarglist()[1:] + op = ResOperation(rop.CALL, args, op.result, descr=descr) + elif op.getopnum() == rop.CALL_PURE: + args = op.getarglist()[1:] + op = ResOperation(rop.CALL, args, op.result, op.getdescr()) + elif op.getopnum() == rop.VIRTUAL_REF: + op = ResOperation(rop.SAME_AS, [op.getarg(0)], op.result) + elif op.getopnum() == rop.VIRTUAL_REF_FINISH: return [] return [op] @@ -36,7 +38,7 @@ newoperations = [] for op in loop.operations: if op.is_guard(): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) modifier = resume.ResumeDataVirtualAdder(descr, memo) newboxes = modifier.finish(EMPTY_VALUES) Modified: pypy/trunk/pypy/jit/metainterp/test/oparser.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/oparser.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/oparser.py Wed Sep 22 14:17:16 2010 @@ -6,7 +6,7 @@ from pypy.jit.metainterp.history import TreeLoop, BoxInt, ConstInt,\ ConstObj, ConstPtr, Box, BasicFailDescr, BoxFloat, ConstFloat,\ LoopToken -from pypy.jit.metainterp.resoperation import rop, ResOperation +from pypy.jit.metainterp.resoperation import rop, ResOperation, ResOpWithDescr, N_aryOp from pypy.jit.metainterp.typesystem import llhelper from pypy.jit.codewriter.heaptracker import adr2int from pypy.rpython.lltypesystem import lltype, llmemory @@ -16,17 +16,29 @@ class ParseError(Exception): pass - class Boxes(object): pass +class ESCAPE_OP(N_aryOp, ResOpWithDescr): + + OPNUM = -123 + + def __init__(self, opnum, args, result, descr=None): + assert opnum == self.OPNUM + self.result = result + self.initarglist(args) + self.setdescr(descr) + + def getopnum(self): + return self.OPNUM + class ExtendedTreeLoop(TreeLoop): def getboxes(self): def opboxes(operations): for op in operations: yield op.result - for box in op.args: + for box in op.getarglist(): yield box def allboxes(): for box in self.inputargs: @@ -171,7 +183,7 @@ opnum = getattr(rop, opname.upper()) except AttributeError: if opname == 'escape': - opnum = -123 + opnum = ESCAPE_OP.OPNUM else: raise ParseError("unknown op: %s" % opname) endnum = line.rfind(')') @@ -228,6 +240,12 @@ descr = self.looptoken return opnum, args, descr, fail_args + def create_op(self, opnum, args, result, descr): + if opnum == ESCAPE_OP.OPNUM: + return ESCAPE_OP(opnum, args, result, descr) + else: + return ResOperation(opnum, args, result, descr) + def parse_result_op(self, line): res, op = line.split("=", 1) res = res.strip() @@ -237,14 +255,16 @@ raise ParseError("Double assign to var %s in line: %s" % (res, line)) rvar = self.box_for_var(res) self.vars[res] = rvar - res = ResOperation(opnum, args, rvar, descr) - res.fail_args = fail_args + res = self.create_op(opnum, args, rvar, descr) + if fail_args is not None: + res.setfailargs(fail_args) return res def parse_op_no_result(self, line): opnum, args, descr, fail_args = self.parse_op(line) - res = ResOperation(opnum, args, None, descr) - res.fail_args = fail_args + res = self.create_op(opnum, args, None, descr) + if fail_args is not None: + res.setfailargs(fail_args) return res def parse_next_op(self, line): Modified: pypy/trunk/pypy/jit/metainterp/test/test_basic.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_basic.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_basic.py Wed Sep 22 14:17:16 2010 @@ -296,7 +296,7 @@ found = 0 for op in get_stats().loops[0]._all_operations(): if op.getopname() == 'guard_true': - liveboxes = op.fail_args + liveboxes = op.getfailargs() assert len(liveboxes) == 3 for box in liveboxes: assert isinstance(box, history.BoxInt) Modified: pypy/trunk/pypy/jit/metainterp/test/test_logger.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_logger.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_logger.py Wed Sep 22 14:17:16 2010 @@ -100,8 +100,8 @@ debug_merge_point("info") ''' loop, oloop = self.reparse(inp, check_equal=False) - assert loop.operations[0].args[0]._get_str() == 'info' - assert oloop.operations[0].args[0]._get_str() == 'info' + assert loop.operations[0].getarg(0)._get_str() == 'info' + assert oloop.operations[0].getarg(0)._get_str() == 'info' def test_floats(self): inp = ''' Modified: pypy/trunk/pypy/jit/metainterp/test/test_loop.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_loop.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_loop.py Wed Sep 22 14:17:16 2010 @@ -178,7 +178,7 @@ found = 0 for op in get_stats().loops[0]._all_operations(): if op.getopname() == 'guard_true': - liveboxes = op.fail_args + liveboxes = op.getfailargs() assert len(liveboxes) == 2 # x, y (in some order) assert isinstance(liveboxes[0], history.BoxInt) assert isinstance(liveboxes[1], history.BoxInt) Modified: pypy/trunk/pypy/jit/metainterp/test/test_oparser.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_oparser.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_oparser.py Wed Sep 22 14:17:16 2010 @@ -16,10 +16,10 @@ """ loop = parse(x) assert len(loop.operations) == 3 - assert [op.opnum for op in loop.operations] == [rop.INT_ADD, rop.INT_SUB, + assert [op.getopnum() for op in loop.operations] == [rop.INT_ADD, rop.INT_SUB, rop.FINISH] assert len(loop.inputargs) == 2 - assert loop.operations[-1].descr + assert loop.operations[-1].getdescr() def test_const_ptr_subops(): x = """ @@ -30,8 +30,8 @@ vtable = lltype.nullptr(S) loop = parse(x, None, locals()) assert len(loop.operations) == 1 - assert loop.operations[0].descr - assert loop.operations[0].fail_args == [] + assert loop.operations[0].getdescr() + assert loop.operations[0].getfailargs() == [] def test_descr(): class Xyz(AbstractDescr): @@ -43,7 +43,7 @@ """ stuff = Xyz() loop = parse(x, None, locals()) - assert loop.operations[0].descr is stuff + assert loop.operations[0].getdescr() is stuff def test_after_fail(): x = """ @@ -64,7 +64,7 @@ """ stuff = Xyz() loop = parse(x, None, locals()) - assert loop.operations[0].descr is stuff + assert loop.operations[0].getdescr() is stuff def test_boxname(): x = """ @@ -111,7 +111,7 @@ TP = lltype.GcArray(lltype.Signed) NULL = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.nullptr(TP)) loop = parse(x, None, {'func_ptr' : NULL}) - assert loop.operations[0].args[0].value == NULL + assert loop.operations[0].getarg(0).value == NULL def test_jump_target(): x = ''' @@ -119,7 +119,7 @@ jump() ''' loop = parse(x) - assert loop.operations[0].descr is loop.token + assert loop.operations[0].getdescr() is loop.token def test_jump_target_other(): looptoken = LoopToken() @@ -128,7 +128,7 @@ jump(descr=looptoken) ''' loop = parse(x, namespace=locals()) - assert loop.operations[0].descr is looptoken + assert loop.operations[0].getdescr() is looptoken def test_floats(): x = ''' @@ -136,7 +136,7 @@ f1 = float_add(f0, 3.5) ''' loop = parse(x) - assert isinstance(loop.operations[0].args[0], BoxFloat) + assert isinstance(loop.operations[0].getarg(0), BoxFloat) def test_debug_merge_point(): x = ''' @@ -147,10 +147,10 @@ debug_merge_point('(stuff) #1') ''' loop = parse(x) - assert loop.operations[0].args[0]._get_str() == 'info' - assert loop.operations[1].args[0]._get_str() == 'info' - assert loop.operations[2].args[0]._get_str() == " info" - assert loop.operations[3].args[0]._get_str() == "(stuff) #1" + assert loop.operations[0].getarg(0)._get_str() == 'info' + assert loop.operations[1].getarg(0)._get_str() == 'info' + assert loop.operations[2].getarg(0)._get_str() == " info" + assert loop.operations[3].getarg(0)._get_str() == "(stuff) #1" def test_descr_with_obj_print(): Modified: pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py Wed Sep 22 14:17:16 2010 @@ -42,7 +42,7 @@ opt = optimizeopt.Optimizer(FakeMetaInterpStaticData(LLtypeMixin.cpu), None) fdescr = ResumeGuardDescr(None, None) - op = ResOperation(rop.GUARD_TRUE, [], None, descr=fdescr) + op = ResOperation(rop.GUARD_TRUE, ['dummy'], None, descr=fdescr) # setup rd data fi0 = resume.FrameInfo(None, "code0", 11) fdescr.rd_frame_info_list = resume.FrameInfo(fi0, "code1", 33) @@ -50,11 +50,11 @@ fdescr.rd_snapshot = resume.Snapshot(snapshot0, [b1]) # opt.store_final_boxes_in_guard(op) - if op.fail_args == [b0, b1]: + if op.getfailargs() == [b0, b1]: assert fdescr.rd_numb.nums == [tag(1, TAGBOX)] assert fdescr.rd_numb.prev.nums == [tag(0, TAGBOX)] else: - assert op.fail_args == [b1, b0] + assert op.getfailargs() == [b1, b0] assert fdescr.rd_numb.nums == [tag(0, TAGBOX)] assert fdescr.rd_numb.prev.nums == [tag(1, TAGBOX)] assert fdescr.rd_virtuals is None @@ -140,24 +140,26 @@ print '%-39s| %s' % (txt1[:39], txt2[:39]) txt1 = txt1[39:] txt2 = txt2[39:] - assert op1.opnum == op2.opnum - assert len(op1.args) == len(op2.args) - for x, y in zip(op1.args, op2.args): + assert op1.getopnum() == op2.getopnum() + assert op1.numargs() == op2.numargs() + for i in range(op1.numargs()): + x = op1.getarg(i) + y = op2.getarg(i) assert x == remap.get(y, y) if op2.result in remap: assert op1.result == remap[op2.result] else: remap[op2.result] = op1.result - if op1.opnum != rop.JUMP: # xxx obscure - assert op1.descr == op2.descr - if op1.fail_args or op2.fail_args: - assert len(op1.fail_args) == len(op2.fail_args) + if op1.getopnum() != rop.JUMP: # xxx obscure + assert op1.getdescr() == op2.getdescr() + if op1.getfailargs() or op2.getfailargs(): + assert len(op1.getfailargs()) == len(op2.getfailargs()) if strict_fail_args: - for x, y in zip(op1.fail_args, op2.fail_args): + for x, y in zip(op1.getfailargs(), op2.getfailargs()): assert x == remap.get(y, y) else: - fail_args1 = set(op1.fail_args) - fail_args2 = set([remap.get(y, y) for y in op2.fail_args]) + fail_args1 = set(op1.getfailargs()) + fail_args2 = set([remap.get(y, y) for y in op2.getfailargs()]) assert fail_args1 == fail_args2 assert len(oplist1) == len(oplist2) print '-'*57 @@ -209,7 +211,7 @@ self.metainterp_sd = metainterp_sd self.original_greenkey = original_greenkey def store_final_boxes(self, op, boxes): - op.fail_args = boxes + op.setfailargs(boxes) def __eq__(self, other): return type(self) is type(other) # xxx obscure @@ -2361,8 +2363,8 @@ from pypy.jit.metainterp.test.test_resume import ResumeDataFakeReader from pypy.jit.metainterp.test.test_resume import MyMetaInterp guard_op, = [op for op in self.loop.operations if op.is_guard()] - fail_args = guard_op.fail_args - fdescr = guard_op.descr + fail_args = guard_op.getfailargs() + fdescr = guard_op.getdescr() assert fdescr.guard_opnum == guard_opnum reader = ResumeDataFakeReader(fdescr, fail_args, MyMetaInterp(self.cpu)) Modified: pypy/trunk/pypy/jit/metainterp/test/test_recursive.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_recursive.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_recursive.py Wed Sep 22 14:17:16 2010 @@ -319,8 +319,8 @@ for loop in get_stats().loops: assert len(loop.operations) <= length + 5 # because we only check once per metainterp bytecode for op in loop.operations: - if op.is_guard() and hasattr(op.descr, '_debug_suboperations'): - assert len(op.descr._debug_suboperations) <= length + 5 + if op.is_guard() and hasattr(op.getdescr(), '_debug_suboperations'): + assert len(op.getdescr()._debug_suboperations) <= length + 5 def test_inline_trace_limit(self): myjitdriver = JitDriver(greens=[], reds=['n']) Modified: pypy/trunk/pypy/jit/metainterp/test/test_virtualref.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_virtualref.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_virtualref.py Wed Sep 22 14:17:16 2010 @@ -71,11 +71,11 @@ # ops = self.metainterp.staticdata.stats.loops[0].operations [guard_op] = [op for op in ops - if op.opnum == rop.GUARD_NOT_FORCED] - bxs1 = [box for box in guard_op.fail_args + if op.getopnum() == rop.GUARD_NOT_FORCED] + bxs1 = [box for box in guard_op.getfailargs() if str(box._getrepr_()).endswith('.X')] assert len(bxs1) == 1 - bxs2 = [box for box in guard_op.fail_args + bxs2 = [box for box in guard_op.getfailargs() if str(box._getrepr_()).endswith('JitVirtualRef')] assert len(bxs2) == 1 JIT_VIRTUAL_REF = self.vrefinfo.JIT_VIRTUAL_REF @@ -84,11 +84,11 @@ # try reloading from blackhole.py's point of view from pypy.jit.metainterp.resume import ResumeDataDirectReader cpu = self.metainterp.cpu - cpu.get_latest_value_count = lambda : len(guard_op.fail_args) - cpu.get_latest_value_int = lambda i:guard_op.fail_args[i].getint() - cpu.get_latest_value_ref = lambda i:guard_op.fail_args[i].getref_base() + cpu.get_latest_value_count = lambda : len(guard_op.getfailargs()) + cpu.get_latest_value_int = lambda i:guard_op.getfailargs()[i].getint() + cpu.get_latest_value_ref = lambda i:guard_op.getfailargs()[i].getref_base() cpu.clear_latest_values = lambda count: None - resumereader = ResumeDataDirectReader(cpu, guard_op.descr) + resumereader = ResumeDataDirectReader(cpu, guard_op.getdescr()) vrefinfo = self.metainterp.staticdata.virtualref_info lst = [] vrefinfo.continue_tracing = lambda vref, virtual: \ @@ -100,7 +100,7 @@ lst[0][0]) # assert correct type # # try reloading from pyjitpl's point of view - self.metainterp.rebuild_state_after_failure(guard_op.descr) + self.metainterp.rebuild_state_after_failure(guard_op.getdescr()) assert len(self.metainterp.framestack) == 1 assert len(self.metainterp.virtualref_boxes) == 2 assert self.metainterp.virtualref_boxes[0].value == bxs1[0].value Modified: pypy/trunk/pypy/jit/tool/showstats.py ============================================================================== --- pypy/trunk/pypy/jit/tool/showstats.py (original) +++ pypy/trunk/pypy/jit/tool/showstats.py Wed Sep 22 14:17:16 2010 @@ -17,7 +17,7 @@ num_dmp = 0 num_guards = 0 for op in loop.operations: - if op.opnum == rop.DEBUG_MERGE_POINT: + if op.getopnum() == rop.DEBUG_MERGE_POINT: num_dmp += 1 else: num_ops += 1 From antocuni at codespeak.net Wed Sep 22 14:18:11 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Wed, 22 Sep 2010 14:18:11 +0200 (CEST) Subject: [pypy-svn] r77266 - pypy/branch/resoperation-refactoring Message-ID: <20100922121811.C5940282BFB@codespeak.net> Author: antocuni Date: Wed Sep 22 14:18:10 2010 New Revision: 77266 Removed: pypy/branch/resoperation-refactoring/ Log: kill merged branch From fijal at codespeak.net Wed Sep 22 14:27:47 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 22 Sep 2010 14:27:47 +0200 (CEST) Subject: [pypy-svn] r77267 - pypy/trunk/pypy/jit/backend/llsupport Message-ID: <20100922122747.F0A36282BFB@codespeak.net> Author: fijal Date: Wed Sep 22 14:27:46 2010 New Revision: 77267 Modified: pypy/trunk/pypy/jit/backend/llsupport/gc.py Log: Configure boehm only once. This gives means that we don't have to do it for each test (note that we don't cache failures on hd) Modified: pypy/trunk/pypy/jit/backend/llsupport/gc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/llsupport/gc.py (original) +++ pypy/trunk/pypy/jit/backend/llsupport/gc.py Wed Sep 22 14:27:46 2010 @@ -41,9 +41,12 @@ moving_gc = False gcrootmap = None - def __init__(self, gcdescr, translator, rtyper): - GcLLDescription.__init__(self, gcdescr, translator, rtyper) - # grab a pointer to the Boehm 'malloc' function + @classmethod + def configure_boehm_once(cls): + """ Configure boehm only once, since we don't cache failures + """ + if hasattr(cls, 'malloc_fn_ptr'): + return cls.malloc_fn_ptr from pypy.rpython.tool import rffi_platform compilation_info = rffi_platform.configure_boehm() @@ -59,13 +62,20 @@ GC_MALLOC = "GC_local_malloc" else: GC_MALLOC = "GC_malloc" - malloc_fn_ptr = rffi.llexternal(GC_MALLOC, [lltype.Signed], # size_t, but good enough llmemory.GCREF, compilation_info=compilation_info, sandboxsafe=True, _nowrapper=True) + cls.malloc_fn_ptr = malloc_fn_ptr + cls.compilation_info = compilation_info + return malloc_fn_ptr + + def __init__(self, gcdescr, translator, rtyper): + GcLLDescription.__init__(self, gcdescr, translator, rtyper) + # grab a pointer to the Boehm 'malloc' function + malloc_fn_ptr = self.configure_boehm_once() self.funcptr_for_new = malloc_fn_ptr # on some platform GC_init is required before any other @@ -73,7 +83,7 @@ # XXX move this to tests init_fn_ptr = rffi.llexternal("GC_init", [], lltype.Void, - compilation_info=compilation_info, + compilation_info=self.compilation_info, sandboxsafe=True, _nowrapper=True) From fijal at codespeak.net Wed Sep 22 14:28:34 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 22 Sep 2010 14:28:34 +0200 (CEST) Subject: [pypy-svn] r77268 - pypy/trunk/pypy/jit/backend/x86 Message-ID: <20100922122834.69588282BFB@codespeak.net> Author: fijal Date: Wed Sep 22 14:28:33 2010 New Revision: 77268 Modified: pypy/trunk/pypy/jit/backend/x86/assembler.py pypy/trunk/pypy/jit/backend/x86/codebuf.py Log: * call valgrind_invalidated for each done() call, remove it from assembler.py * add docstrings * kill dead code Modified: pypy/trunk/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/assembler.py (original) +++ pypy/trunk/pypy/jit/backend/x86/assembler.py Wed Sep 22 14:28:33 2010 @@ -419,7 +419,6 @@ mc.MOV_ri(X86_64_SCRATCH_REG.value, adr_new_target) mc.JMP_r(X86_64_SCRATCH_REG.value) - mc.valgrind_invalidated() mc.done() def _inject_debugging_code(self, operations): @@ -475,7 +474,6 @@ # align, e.g. for Mac OS X aligned_words = align_stack_words(words+2)-2 # 2 = EIP+EBP mc.writeimm32(-WORD * aligned_words) - mc.valgrind_invalidated() mc.done() def _call_header(self): @@ -598,7 +596,6 @@ target = newlooptoken._x86_direct_bootstrap_code mc = codebuf.InMemoryCodeBuilder(oldadr, oldadr + 16) mc.JMP(imm(target)) - mc.valgrind_invalidated() mc.done() def _assemble_bootstrap_code(self, inputargs, arglocs): Modified: pypy/trunk/pypy/jit/backend/x86/codebuf.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/codebuf.py (original) +++ pypy/trunk/pypy/jit/backend/x86/codebuf.py Wed Sep 22 14:28:33 2010 @@ -29,6 +29,9 @@ self._pos = 0 def overwrite(self, pos, listofchars): + """ Overwrite a specified position with a given list of chars + (position is relative + """ make_sure_not_resized(listofchars) assert pos + len(listofchars) <= self._size for c in listofchars: @@ -49,35 +52,38 @@ self.writechar(chr(n)) def get_relative_pos(self): + """ Current position, relative to code start + """ return self._pos def tell(self): + """ Tell the current address at machine code block + """ baseaddr = rffi.cast(lltype.Signed, self._data) return baseaddr + self._pos - def seekback(self, count): - pos = self._pos - count - self._pos = pos - self._last_dump_start = pos - def done(self): - # normally, no special action is needed here + """ Called at the end of writing of each piece of machine code. + Even though this function doesn't do much, it's extremely important + to call this for all tools to work, like valgrind or machine code + dumping + """ + self.valgrind_invalidated() if machine_code_dumper.enabled: machine_code_dumper.dump_range(self, self._last_dump_start, self._pos) self._last_dump_start = self._pos - def redone(self, frm, to): - if machine_code_dumper.enabled: - baseaddr = rffi.cast(lltype.Signed, self._data) - machine_code_dumper.dump_range(self, frm - baseaddr, to - baseaddr) - def log(self, msg): + """ Insert information into machine code dumper, if enabled + """ if machine_code_dumper.enabled: machine_code_dumper.dump(self, 'LOG', self._pos, msg) def valgrind_invalidated(self): - # mark the range of the InMemoryCodeBuilder as invalidated for Valgrind + """ Mark the range of the InMemoryCodeBuilder as invalidated + for Valgrind + """ from pypy.jit.backend.x86 import valgrind valgrind.discard_translations(self._data, self._size) From fijal at codespeak.net Wed Sep 22 14:29:26 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 22 Sep 2010 14:29:26 +0200 (CEST) Subject: [pypy-svn] r77269 - pypy/trunk/lib-python/modified-2.5.2 Message-ID: <20100922122926.E3F6036C53A@codespeak.net> Author: fijal Date: Wed Sep 22 14:29:25 2010 New Revision: 77269 Modified: pypy/trunk/lib-python/modified-2.5.2/opcode.py Log: Improve our own opcode reporting by dis Modified: pypy/trunk/lib-python/modified-2.5.2/opcode.py ============================================================================== --- pypy/trunk/lib-python/modified-2.5.2/opcode.py (original) +++ pypy/trunk/lib-python/modified-2.5.2/opcode.py Wed Sep 22 14:29:25 2010 @@ -185,6 +185,7 @@ # pypy modification, experimental bytecode def_op('CALL_LIKELY_BUILTIN', 144) # #args + (#kwargs << 8) def_op('LOOKUP_METHOD', 145) # Index in name list +hasname.append(145) def_op('CALL_METHOD', 146) # #args not including 'self' From arigo at codespeak.net Wed Sep 22 15:23:33 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 22 Sep 2010 15:23:33 +0200 (CEST) Subject: [pypy-svn] r77270 - in pypy/branch/jit-str/pypy/jit/codewriter: . test Message-ID: <20100922132333.A12B6282BFB@codespeak.net> Author: arigo Date: Wed Sep 22 15:23:31 2010 New Revision: 77270 Modified: pypy/branch/jit-str/pypy/jit/codewriter/call.py pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py pypy/branch/jit-str/pypy/jit/codewriter/support.py pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py pypy/branch/jit-str/pypy/jit/codewriter/test/test_list.py Log: Start refactoring ARRAYCOPY and the string operations. The idea is to reach a point in which we can cleanly have a call operation remain as a call, unless some special case applies in optimizeopt. Modified: pypy/branch/jit-str/pypy/jit/codewriter/call.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/call.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/call.py Wed Sep 22 15:23:31 2010 @@ -185,7 +185,7 @@ FUNC.RESULT) return (fnaddr, calldescr) - def getcalldescr(self, op): + def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE): """Return the calldescr that describes all calls done by 'op'. This returns a calldescr that we can put in the corresponding call operation in the calling jitcode. It gets an effectinfo @@ -226,7 +226,8 @@ extraeffect = EffectInfo.EF_CANNOT_RAISE # effectinfo = effectinfo_from_writeanalyze( - self.readwrite_analyzer.analyze(op), self.cpu, extraeffect) + self.readwrite_analyzer.analyze(op), self.cpu, extraeffect, + oopspecindex) # if pure or loopinvariant: assert effectinfo is not None Modified: pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py Wed Sep 22 15:23:31 2010 @@ -15,13 +15,27 @@ EF_LOOPINVARIANT = 3 #special: call it only once per loop EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE = 4 #can raise and force virtualizables + # the 'oopspecindex' field is one of the following values: + OS_NONE = 0 # normal case, no oopspec + OS_ARRAYCOPY = 1 # "list.ll_arraycopy" + OS_STR_CONCAT = 2 # "stroruni.concat" + OS_STR_SLICE_STARTONLY = 3 # "stroruni.slice_startonly" + OS_STR_SLICE_STARTSTOP = 4 # "stroruni.slice_startstop" + OS_STR_SLICE_MINUSONE = 5 # "stroruni.slice_minusone" + OS_UNI_CONCAT = 82 # "stroruni.concat" (+80) + OS_UNI_SLICE_STARTONLY = 83 # "stroruni.slice_startonly" (+80) + OS_UNI_SLICE_STARTSTOP = 84 # "stroruni.slice_startstop" (+80) + OS_UNI_SLICE_MINUSONE = 85 # "stroruni.slice_minusone" (+80) + def __new__(cls, readonly_descrs_fields, write_descrs_fields, write_descrs_arrays, - extraeffect=EF_CAN_RAISE): + extraeffect=EF_CAN_RAISE, + oopspecindex=OS_NONE): key = (frozenset(readonly_descrs_fields), frozenset(write_descrs_fields), frozenset(write_descrs_arrays), - extraeffect) + extraeffect, + oopspecindex) if key in cls._cache: return cls._cache[key] result = object.__new__(cls) @@ -29,6 +43,7 @@ result.write_descrs_fields = write_descrs_fields result.write_descrs_arrays = write_descrs_arrays result.extraeffect = extraeffect + result.oopspecindex = oopspecindex cls._cache[key] = result return result @@ -36,7 +51,8 @@ return self.extraeffect >= self.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE def effectinfo_from_writeanalyze(effects, cpu, - extraeffect=EffectInfo.EF_CAN_RAISE): + extraeffect=EffectInfo.EF_CAN_RAISE, + oopspecindex=EffectInfo.OS_NONE): from pypy.translator.backendopt.writeanalyze import top_set if effects is top_set: return None Modified: pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py Wed Sep 22 15:23:31 2010 @@ -6,6 +6,7 @@ from pypy.objspace.flow.model import Block, Link, c_last_exception from pypy.jit.codewriter.flatten import ListOfKind, IndirectCallTargets from pypy.jit.codewriter import support, heaptracker +from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter.policy import log from pypy.jit.metainterp.typesystem import deref, arrayItem from pypy.rlib import objectmodel @@ -310,6 +311,8 @@ # dispatch to various implementations depending on the oopspec_name if oopspec_name.startswith('list.') or oopspec_name == 'newlist': prepare = self._handle_list_call + elif oopspec_name.startswith('stroruni.'): + prepare = self._handle_stroruni_call elif oopspec_name.startswith('virtual_ref'): prepare = self._handle_virtual_ref_call else: @@ -982,10 +985,7 @@ return extraop + [op] def do_fixed_list_ll_arraycopy(self, op, args, arraydescr): - calldescr = self.callcontrol.getcalldescr(op) - return SpaceOperation('arraycopy', - [calldescr, op.args[0]] + args + [arraydescr], - op.result) + return self._handle_oopspec_call(op, args, EffectInfo.OS_ARRAYCOPY) # ---------- resizable lists ---------- @@ -1023,6 +1023,30 @@ [args[0], lengthdescr], op.result) # ---------- + # Strings and Unicodes. + + def _handle_oopspec_call(self, op, args, oopspecindex): + cc = self.callcontrol + calldescr = cc.getcalldescr(op, oopspecindex=oopspecindex) + return SpaceOperation('oopspec_call', + [calldescr, op.args[0]] + args, + op.result) + + def _handle_stroruni_call(self, op, oopspec_name, args): + dict = {"stroruni.concat": EffectInfo.OS_STR_CONCAT, + "stroruni.slice_startonly": EffectInfo.OS_STR_SLICE_STARTONLY, + "stroruni.slice_startstop": EffectInfo.OS_STR_SLICE_STARTSTOP, + "stroruni.slice_minusone": EffectInfo.OS_STR_SLICE_MINUSONE} + base = dict[oopspec_name] + if args[0].concretetype.TO == rstr.STR: + offset = 0 + elif args[0].concretetype.TO == rstr.UNICODE: + offset = 80 + else: + assert 0, "args[0].concretetype must be STR or UNICODE" + return self._handle_oopspec_call(op, args, base + offset) + + # ---------- # VirtualRefs. def _handle_virtual_ref_call(self, op, oopspec_name, args): Modified: pypy/branch/jit-str/pypy/jit/codewriter/support.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/support.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/support.py Wed Sep 22 15:23:31 2010 @@ -277,11 +277,6 @@ _ll_1_str_str2unicode = ll_rstr.LLHelpers.ll_str2unicode - _ll_2_stroruni_concat = ll_rstr.LLHelpers.ll_strconcat - _ll_2_stroruni_slice_startonly = ll_rstr.LLHelpers.ll_stringslice_startonly - _ll_3_stroruni_slice_startstop = ll_rstr.LLHelpers.ll_stringslice_startstop - _ll_1_stroruni_slice_minusone = ll_rstr.LLHelpers.ll_stringslice_minusone - # ---------- malloc with del ---------- def _ll_2_raw_malloc(TP, size): Modified: pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py Wed Sep 22 15:23:31 2010 @@ -4,9 +4,9 @@ from pypy.objspace.flow.model import SpaceOperation, Variable, Constant from pypy.jit.codewriter.jtransform import Transformer from pypy.jit.metainterp.history import getkind -from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr +from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rlist from pypy.translator.unsimplify import varoftype -from pypy.jit.codewriter import heaptracker +from pypy.jit.codewriter import heaptracker, effectinfo def const(x): return Constant(x, lltype.typeOf(x)) @@ -21,6 +21,8 @@ return ('calldescr', FUNC, ARGS, RESULT) def fielddescrof(self, STRUCT, name): return ('fielddescr', STRUCT, name) + def arraydescrof(self, ARRAY): + return FakeDescr(('arraydescr', ARRAY)) def sizeof(self, STRUCT): return FakeDescr(('sizedescr', STRUCT)) @@ -74,8 +76,8 @@ class FakeBuiltinCallControl: def guess_call_kind(self, op): return 'builtin' - def getcalldescr(self, op): - return 'calldescr' + def getcalldescr(self, op, oopspecindex): + return 'calldescr-%d' % oopspecindex def calldescr_canraise(self, calldescr): return False @@ -681,7 +683,6 @@ assert op1.result == v2 def test_str_concat(): - py.test.xfail('later') # test that the oopspec is present and correctly transformed PSTR = lltype.Ptr(rstr.STR) FUNC = lltype.FuncType([PSTR, PSTR], PSTR) @@ -693,8 +694,28 @@ op = SpaceOperation('direct_call', [const(func), v1, v2], v3) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) - assert op1.opname == 'residual_call_r_r' - assert list(op1.args[2]) == [v1, v2] + assert op1.opname == 'oopspec_call' + assert op1.args[0] == 'calldescr-%d' % effectinfo.EffectInfo.OS_STR_CONCAT + assert op1.args[1].value == func + assert op1.args[2:] == [v1, v2] + assert op1.result == v3 + +def test_unicode_concat(): + # test that the oopspec is present and correctly transformed + PSTR = lltype.Ptr(rstr.UNICODE) + FUNC = lltype.FuncType([PSTR, PSTR], PSTR) + func = lltype.functionptr(FUNC, 'll_strconcat', + _callable=rstr.LLHelpers.ll_strconcat) + v1 = varoftype(PSTR) + v2 = varoftype(PSTR) + v3 = varoftype(PSTR) + op = SpaceOperation('direct_call', [const(func), v1, v2], v3) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = tr.rewrite_operation(op) + assert op1.opname == 'oopspec_call' + assert op1.args[0] == 'calldescr-%d' % effectinfo.EffectInfo.OS_UNI_CONCAT + assert op1.args[1].value == func + assert op1.args[2:] == [v1, v2] assert op1.result == v3 def test_str_stringslice_startonly(): @@ -710,9 +731,11 @@ op = SpaceOperation('direct_call', [const(func), v1, v2], v3) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) - assert op1.opname == 'residual_call_ir_r' - assert list(op1.args[2]) == [v2] - assert list(op1.args[3]) == [v1] + assert op1.opname == 'oopspec_call' + assert op1.args[0] == 'calldescr-%d' % ( + effectinfo.EffectInfo.OS_STR_SLICE_STARTONLY) + assert op1.args[1].value == func + assert op1.args[2:] == [v1, v2] assert op1.result == v3 def test_str_stringslice_startstop(): @@ -720,8 +743,8 @@ PSTR = lltype.Ptr(rstr.STR) INT = lltype.Signed FUNC = lltype.FuncType([PSTR, INT, INT], PSTR) - func = lltype.functionptr(FUNC, 'll_stringslice_startstop', - _callable=rstr.LLHelpers.ll_stringslice_startstop) + func = lltype.functionptr(FUNC, '_ll_stringslice_startstop', + _callable=rstr.LLHelpers._ll_stringslice_startstop) v1 = varoftype(PSTR) v2 = varoftype(INT) v3 = varoftype(INT) @@ -729,9 +752,11 @@ op = SpaceOperation('direct_call', [const(func), v1, v2, v3], v4) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) - assert op1.opname == 'residual_call_ir_r' - assert list(op1.args[2]) == [v2, v3] - assert list(op1.args[3]) == [v1] + assert op1.opname == 'oopspec_call' + assert op1.args[0] == 'calldescr-%d' % ( + effectinfo.EffectInfo.OS_STR_SLICE_STARTSTOP) + assert op1.args[1].value == func + assert op1.args[2:] == [v1, v2, v3] assert op1.result == v4 def test_str_stringslice_minusone(): @@ -745,6 +770,31 @@ op = SpaceOperation('direct_call', [const(func), v1], v2) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) - assert op1.opname == 'residual_call_r_r' - assert list(op1.args[2]) == [v1] + assert op1.opname == 'oopspec_call' + assert op1.args[0] == 'calldescr-%d' % ( + effectinfo.EffectInfo.OS_STR_SLICE_MINUSONE) + assert op1.args[1].value == func + assert op1.args[2:] == [v1] assert op1.result == v2 + +def test_list_ll_arraycopy(): + from pypy.rlib.rgc import ll_arraycopy + LIST = lltype.GcArray(lltype.Signed) + PLIST = lltype.Ptr(LIST) + INT = lltype.Signed + FUNC = lltype.FuncType([PLIST]*2+[INT]*3, lltype.Void) + func = lltype.functionptr(FUNC, 'll_arraycopy', _callable=ll_arraycopy) + v1 = varoftype(PLIST) + v2 = varoftype(PLIST) + v3 = varoftype(INT) + v4 = varoftype(INT) + v5 = varoftype(INT) + v6 = varoftype(lltype.Void) + op = SpaceOperation('direct_call', [const(func), v1, v2, v3, v4, v5], v6) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = tr.rewrite_operation(op) + assert op1.opname == 'oopspec_call' + assert op1.args[0] == 'calldescr-%d' % effectinfo.EffectInfo.OS_ARRAYCOPY + assert op1.args[1].value == func + assert op1.args[2:] == [v1, v2, v3, v4, v5] + assert op1.result == v6 Modified: pypy/branch/jit-str/pypy/jit/codewriter/test/test_list.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/test/test_list.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/test/test_list.py Wed Sep 22 15:23:31 2010 @@ -36,10 +36,14 @@ class FakeCallControl: class getcalldescr(AbstractDescr): - def __init__(self, op): + def __init__(self, op, oopspecindex=0): self.op = op + self.oopspecindex = oopspecindex def __repr__(self): - return '' + if self.oopspecindex == 0: + return '' + else: + return '' % self.oopspecindex def builtin_test(oopspec_name, args, RESTYPE, expected): v_result = varoftype(RESTYPE) @@ -99,7 +103,7 @@ varoftype(lltype.Signed), varoftype(lltype.Signed)], lltype.Void, """ - arraycopy , $'myfunc', %r0, %r1, %i0, %i1, %i2, + oopspec_call , $'myfunc', %r0, %r1, %i0, %i1, %i2 """) def test_fixed_getitem(): From arigo at codespeak.net Wed Sep 22 16:02:58 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 22 Sep 2010 16:02:58 +0200 (CEST) Subject: [pypy-svn] r77271 - in pypy/branch/jit-str/pypy/jit: codewriter codewriter/test metainterp metainterp/optimizeopt metainterp/test Message-ID: <20100922140258.CAAB5282BFB@codespeak.net> Author: arigo Date: Wed Sep 22 16:02:56 2010 New Revision: 77271 Modified: pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py pypy/branch/jit-str/pypy/jit/codewriter/test/test_list.py pypy/branch/jit-str/pypy/jit/metainterp/blackhole.py pypy/branch/jit-str/pypy/jit/metainterp/executor.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/rewrite.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/jit-str/pypy/jit/metainterp/pyjitpl.py pypy/branch/jit-str/pypy/jit/metainterp/resoperation.py pypy/branch/jit-str/pypy/jit/metainterp/resume.py pypy/branch/jit-str/pypy/jit/metainterp/simple_optimize.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py Log: Still in-progress. Rewriting stuff. Modified: pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py Wed Sep 22 16:02:56 2010 @@ -1028,7 +1028,7 @@ def _handle_oopspec_call(self, op, args, oopspecindex): cc = self.callcontrol calldescr = cc.getcalldescr(op, oopspecindex=oopspecindex) - return SpaceOperation('oopspec_call', + return SpaceOperation('call_oopspec', [calldescr, op.args[0]] + args, op.result) Modified: pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py Wed Sep 22 16:02:56 2010 @@ -694,7 +694,7 @@ op = SpaceOperation('direct_call', [const(func), v1, v2], v3) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) - assert op1.opname == 'oopspec_call' + assert op1.opname == 'call_oopspec' assert op1.args[0] == 'calldescr-%d' % effectinfo.EffectInfo.OS_STR_CONCAT assert op1.args[1].value == func assert op1.args[2:] == [v1, v2] @@ -712,7 +712,7 @@ op = SpaceOperation('direct_call', [const(func), v1, v2], v3) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) - assert op1.opname == 'oopspec_call' + assert op1.opname == 'call_oopspec' assert op1.args[0] == 'calldescr-%d' % effectinfo.EffectInfo.OS_UNI_CONCAT assert op1.args[1].value == func assert op1.args[2:] == [v1, v2] @@ -731,7 +731,7 @@ op = SpaceOperation('direct_call', [const(func), v1, v2], v3) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) - assert op1.opname == 'oopspec_call' + assert op1.opname == 'call_oopspec' assert op1.args[0] == 'calldescr-%d' % ( effectinfo.EffectInfo.OS_STR_SLICE_STARTONLY) assert op1.args[1].value == func @@ -752,7 +752,7 @@ op = SpaceOperation('direct_call', [const(func), v1, v2, v3], v4) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) - assert op1.opname == 'oopspec_call' + assert op1.opname == 'call_oopspec' assert op1.args[0] == 'calldescr-%d' % ( effectinfo.EffectInfo.OS_STR_SLICE_STARTSTOP) assert op1.args[1].value == func @@ -770,7 +770,7 @@ op = SpaceOperation('direct_call', [const(func), v1], v2) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) - assert op1.opname == 'oopspec_call' + assert op1.opname == 'call_oopspec' assert op1.args[0] == 'calldescr-%d' % ( effectinfo.EffectInfo.OS_STR_SLICE_MINUSONE) assert op1.args[1].value == func @@ -793,7 +793,7 @@ op = SpaceOperation('direct_call', [const(func), v1, v2, v3, v4, v5], v6) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) - assert op1.opname == 'oopspec_call' + assert op1.opname == 'call_oopspec' assert op1.args[0] == 'calldescr-%d' % effectinfo.EffectInfo.OS_ARRAYCOPY assert op1.args[1].value == func assert op1.args[2:] == [v1, v2, v3, v4, v5] Modified: pypy/branch/jit-str/pypy/jit/codewriter/test/test_list.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/test/test_list.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/test/test_list.py Wed Sep 22 16:02:56 2010 @@ -103,7 +103,7 @@ varoftype(lltype.Signed), varoftype(lltype.Signed)], lltype.Void, """ - oopspec_call , $'myfunc', %r0, %r1, %i0, %i1, %i2 + call_oopspec , $'myfunc', %r0, %r1, %i0, %i1, %i2 """) def test_fixed_getitem(): Modified: pypy/branch/jit-str/pypy/jit/metainterp/blackhole.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/blackhole.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/blackhole.py Wed Sep 22 16:02:56 2010 @@ -1024,10 +1024,6 @@ def bhimpl_arraylen_gc(cpu, array, arraydescr): return cpu.bh_arraylen_gc(arraydescr, array) - @arguments("cpu", "d", "i", "r", "r", "i", "i", "i", "d") - def bhimpl_arraycopy(cpu, calldescr, func, x1, x2, x3, x4, x5, arraydescr): - cpu.bh_call_v(func, calldescr, [x3, x4, x5], [x1, x2], None) - @arguments("cpu", "r", "d", "d", "i", returns="i") def bhimpl_getarrayitem_vable_i(cpu, vable, fielddescr, arraydescr, index): array = cpu.bh_getfield_gc_r(vable, fielddescr) Modified: pypy/branch/jit-str/pypy/jit/metainterp/executor.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/executor.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/executor.py Wed Sep 22 16:02:56 2010 @@ -165,12 +165,6 @@ def do_new_with_vtable(cpu, _, clsbox): return BoxPtr(exec_new_with_vtable(cpu, clsbox)) -def do_arraycopy(cpu, _, calldescr, funcbox, x1box, x2box, - x3box, x4box, x5box, arraydescr): - cpu.bh_call_v(funcbox.getint(), calldescr, - [x3box.getint(), x4box.getint(), x5box.getint()], - [x1box.getref_base(), x2box.getref_base()], None) - def do_int_add_ovf(cpu, metainterp, box1, box2): # the overflow operations can be called without a metainterp, if an # overflow cannot occur Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py Wed Sep 22 16:02:56 2010 @@ -126,12 +126,6 @@ def setitem(self, index, value): raise NotImplementedError - def getchar(self): - raise NotImplementedError - - def setchar(self, charvalue): - raise NotImplementedError - class ConstantValue(OptValue): def __init__(self, box): self.make_constant(box) Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/rewrite.py Wed Sep 22 16:02:56 2010 @@ -138,6 +138,7 @@ # replace CALL_PURE with just CALL self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result, op.descr)) + def optimize_guard(self, op, constbox, emit_operation=True): value = self.getvalue(op.args[0]) if value.is_constant(): Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py Wed Sep 22 16:02:56 2010 @@ -7,6 +7,8 @@ from pypy.jit.metainterp.optimizeutil import _findall from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.optimizeopt.optimizer import * +from pypy.jit.codewriter.effectinfo import EffectInfo +from pypy.rlib.unroll import unrolling_iterable class AbstractVirtualValue(OptValue): @@ -193,37 +195,42 @@ def _make_virtual(self, modifier): return modifier.make_varray(self.arraydescr) -class VStringLength1Value(AbstractVirtualValue): +class VStringPlainValue(AbstractVirtualValue): - def __init__(self, optimizer, keybox, source_op=None): + def __init__(self, optimizer, size, keybox, source_op=None): AbstractVirtualValue.__init__(self, optimizer, keybox, source_op) - self._char = CVAL_ZERO + self._chars = [CVAL_ZERO] * size + + def getlength(self): + return len(self._chars) - def getchar(self): - return self._char + def getitem(self, index): + return self._chars[index] - def setchar(self, charvalue): + def setitem(self, index, charvalue): assert isinstance(charvalue, OptValue) - self._char = charvalue + self._chars[index] = charvalue def _really_force(self): assert self.source_op is not None newoperations = self.optimizer.newoperations newoperations.append(self.source_op) self.box = box = self.source_op.result - charbox = self._char.force_box() - op = ResOperation(rop.STRSETITEM, - [box, ConstInt(0), charbox], None) + for i in range(len(self._chars)): + charbox = self._chars[i].force_box() + op = ResOperation(rop.STRSETITEM, + [box, ConstInt(i), charbox], None) newoperations.append(op) def get_args_for_fail(self, modifier): if self.box is None and not modifier.already_seen_virtual(self.keybox): - charboxes = [self._char.get_key_box()] + charboxes = [box.get_key_box() for box in self._chars] modifier.register_virtual_fields(self.keybox, charboxes) - self._char.get_args_for_fail(modifier) + for box in self._chars: + box.get_args_for_fail(modifier) def _make_virtual(self, modifier): - return modifier.make_vstring() + return modifier.make_vstrconcat() class __extend__(SpecNode): def setup_virtual_node(self, optimizer, box, newinputargs): @@ -313,8 +320,8 @@ self.make_equal_to(box, vvalue) return vvalue - def make_vstring_length1(self, box, source_op=None): - vvalue = VStringLength1Value(self.optimizer, box, source_op) + def make_vstring_plain(self, length, box, source_op=None): + vvalue = VStringPlainValue(self.optimizer, length, box, source_op) self.make_equal_to(box, vvalue) return vvalue @@ -457,12 +464,25 @@ ###self.heap_op_optimizer.optimize_SETARRAYITEM_GC(op, value, fieldvalue) self.emit_operation(op) - def optimize_ARRAYCOPY(self, op): - source_value = self.getvalue(op.args[2]) - dest_value = self.getvalue(op.args[3]) - source_start_box = self.get_constant_box(op.args[4]) - dest_start_box = self.get_constant_box(op.args[5]) - length = self.get_constant_box(op.args[6]) + def optimize_CALL(self, op): + # dispatch based on 'oopspecindex' to a method that handles + # specifically the given oopspec call. For non-oopspec calls, + # oopspecindex is just zero. + effectinfo = op.descr.get_extra_info() + if effectinfo is not None: + oopspecindex = effectinfo.oopspecindex + for value, meth in opt_call_oopspec_ops: + if oopspecindex == value: + if meth(self, op): + return + self.emit_operation(op) + + def opt_call_oopspec_ARRAYCOPY(self, op): + source_value = self.getvalue(op.args[1]) + dest_value = self.getvalue(op.args[2]) + source_start_box = self.get_constant_box(op.args[3]) + dest_start_box = self.get_constant_box(op.args[4]) + length = self.get_constant_box(op.args[5]) if (source_value.is_virtual() and source_start_box and dest_start_box and length and dest_value.is_virtual()): # XXX optimize the case where dest value is not virtual, @@ -472,48 +492,47 @@ for index in range(length.getint()): val = source_value.getitem(index + source_start) dest_value.setitem(index + dest_start, val) - return + return True if length and length.getint() == 0: - return # 0-length arraycopy - descr = op.args[0] - assert isinstance(descr, AbstractDescr) - self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result, - descr)) + return True # 0-length arraycopy + return False def optimize_NEWSTR(self, op): length_box = self.get_constant_box(op.args[0]) - if length_box and length_box.getint() == 1: # NEWSTR(1) + if length_box: # if the original 'op' did not have a ConstInt as argument, # build a new one with the ConstInt argument if not isinstance(op.args[0], ConstInt): - op = ResOperation(rop.NEWSTR, [CONST_1], op.result) - self.make_vstring_length1(op.result, op) + op = ResOperation(rop.NEWSTR, [length_box], op.result) + self.make_vstring_plain(length_box.getint(), op.result, op) else: self.emit_operation(op) def optimize_STRSETITEM(self, op): value = self.getvalue(op.args[0]) - if value.is_virtual(): - charvalue = self.getvalue(op.args[2]) - value.setchar(charvalue) - else: - value.ensure_nonnull() - self.emit_operation(op) + if value.is_virtual() and isinstance(value, VStringPlainValue): + indexbox = self.get_constant_box(op.args[1]) + if indexbox is not None: + value.setitem(indexbox.getint(), self.getvalue(op.args[2])) + return + value.ensure_nonnull() + self.emit_operation(op) def optimize_STRGETITEM(self, op): value = self.getvalue(op.args[0]) - if value.is_virtual(): - charvalue = value.getchar() - assert charvalue is not None - self.make_equal_to(op.result, charvalue) - else: - value.ensure_nonnull() - self.emit_operation(op) + if value.is_virtual() and isinstance(value, VStringPlainValue): + indexbox = self.get_constant_box(op.args[1]) + if indexbox is not None: + charvalue = value.getitem(indexbox.getint()) + self.make_equal_to(op.result, charvalue) + return + value.ensure_nonnull() + self.emit_operation(op) def optimize_STRLEN(self, op): value = self.getvalue(op.args[0]) if value.is_virtual(): - self.make_constant_int(op.result, 1) + self.make_constant_int(op.result, value.getlength()) else: value.ensure_nonnull() self.emit_operation(op) @@ -528,3 +547,14 @@ self.emit_operation(op) optimize_ops = _findall(OptVirtualize, 'optimize_') + +def _findall_call_oopspec(): + prefix = 'opt_call_oopspec_' + result = [] + for name in dir(OptVirtualize): + if name.startswith(prefix): + value = getattr(EffectInfo, 'OS_' + name[len(prefix):]) + assert isinstance(value, int) and value != 0 + result.append((value, getattr(OptVirtualize, name))) + return unrolling_iterable(result) +opt_call_oopspec_ops = _findall_call_oopspec() Modified: pypy/branch/jit-str/pypy/jit/metainterp/pyjitpl.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/pyjitpl.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/pyjitpl.py Wed Sep 22 16:02:56 2010 @@ -421,14 +421,6 @@ def opimpl_arraylen_gc(self, arraybox, arraydescr): return self.execute_with_descr(rop.ARRAYLEN_GC, arraydescr, arraybox) - @arguments("descr", "box", "box", "box", "box", "box", "box", "descr") - def opimpl_arraycopy(self, calldescr, fnptr, sourcebox, destbox, - source_startbox, dest_startbox, lengthbox, - arraydescr): - self.execute_with_descr(rop.ARRAYCOPY, arraydescr, calldescr, fnptr, - sourcebox, destbox, source_startbox, - dest_startbox, lengthbox) - @arguments("orgpc", "box", "descr", "box") def opimpl_check_neg_index(self, orgpc, arraybox, arraydescr, indexbox): negbox = self.metainterp.execute_and_record( Modified: pypy/branch/jit-str/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/resoperation.py Wed Sep 22 16:02:56 2010 @@ -213,7 +213,6 @@ 'SETARRAYITEM_RAW/3d', 'SETFIELD_GC/2d', 'SETFIELD_RAW/2d', - 'ARRAYCOPY/7d', # removed before it's passed to the backend 'NEWSTR/1', 'STRSETITEM/3', 'UNICODESETITEM/3', Modified: pypy/branch/jit-str/pypy/jit/metainterp/resume.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/resume.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/resume.py Wed Sep 22 16:02:56 2010 @@ -253,8 +253,8 @@ def make_varray(self, arraydescr): return VArrayInfo(arraydescr) - def make_vstring(self): - return VStringInfo() + def make_vstrconcat(self): + return VStrConcatInfo() def register_virtual_fields(self, virtualbox, fieldboxes): tagged = self.liveboxes_from_env.get(virtualbox, UNASSIGNEDVIRTUAL) @@ -489,7 +489,11 @@ for i in self.fieldnums: debug_print("\t\t", str(untag(i))) -class VStringInfo(AbstractVirtualInfo): +class VStrConcatInfo(AbstractVirtualInfo): + """Stands for the string made out of the concatenation of all + fieldnums. Each fieldnum can be an integer (the ord() of a single + character) or a pointer (another string). XXX only integers implemented + """ def __init__(self): pass #self.fieldnums = ... Modified: pypy/branch/jit-str/pypy/jit/metainterp/simple_optimize.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/simple_optimize.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/simple_optimize.py Wed Sep 22 16:02:56 2010 @@ -9,14 +9,12 @@ def transform(op): from pypy.jit.metainterp.history import AbstractDescr - # change ARRAYCOPY to call, so we don't have to pass around - # unnecessary information to the backend. Do the same with VIRTUAL_REF_*. - if op.opnum == rop.ARRAYCOPY: - descr = op.args[0] - assert isinstance(descr, AbstractDescr) - op = ResOperation(rop.CALL, op.args[1:], op.result, descr=descr) - elif op.opnum == rop.CALL_PURE: + # Rename CALL_PURE and CALL_OOPSPEC to CALL. + # Simplify the VIRTUAL_REF_* so that they don't show up in the backend. + if op.opnum == rop.CALL_PURE: op = ResOperation(rop.CALL, op.args[1:], op.result, op.descr) + elif op.opnum == rop.CALL_OOPSPEC: + op = ResOperation(rop.CALL, op.args[:], op.result, op.descr) elif op.opnum == rop.VIRTUAL_REF: op = ResOperation(rop.SAME_AS, [op.args[0]], op.result) elif op.opnum == rop.VIRTUAL_REF_FINISH: Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py Wed Sep 22 16:02:56 2010 @@ -115,6 +115,8 @@ mayforcevirtdescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([nextdescr], [], [], EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE)) + arraycopydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], oopspecindex=EffectInfo.OS_ARRAYCOPY)) class LoopToken(AbstractDescr): pass asmdescr = LoopToken() # it can be whatever, it's not a descr though Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py Wed Sep 22 16:02:56 2010 @@ -3080,7 +3080,7 @@ setarrayitem_gc(p1, 1, 1, descr=arraydescr) p2 = new_array(3, descr=arraydescr) setarrayitem_gc(p2, 1, 3, descr=arraydescr) - arraycopy(0, 0, p1, p2, 1, 1, 2, descr=arraydescr) + call(0, p1, p2, 1, 1, 2, descr=arraycopydescr) i2 = getarrayitem_gc(p2, 1, descr=arraydescr) jump(i2) ''' @@ -3097,7 +3097,7 @@ p2 = new_array(3, descr=arraydescr) setarrayitem_gc(p1, 0, i0, descr=arraydescr) setarrayitem_gc(p2, 0, 3, descr=arraydescr) - arraycopy(0, 0, p1, p2, 1, 1, 2, descr=arraydescr) + call(0, p1, p2, 1, 1, 2, descr=arraycopydescr) i2 = getarrayitem_gc(p2, 0, descr=arraydescr) jump(i2) ''' @@ -3114,7 +3114,7 @@ p2 = new_array(3, descr=arraydescr) setarrayitem_gc(p1, 2, 10, descr=arraydescr) setarrayitem_gc(p2, 2, 13, descr=arraydescr) - arraycopy(0, 0, p1, p2, 0, 0, 3, descr=arraydescr) + call(0, p1, p2, 0, 0, 3, descr=arraycopydescr) jump(p2) ''' expected = ''' @@ -3131,7 +3131,7 @@ ops = ''' [p1] p0 = new_array(0, descr=arraydescr) - arraycopy(0, 0, p0, p1, 0, 0, 0, descr=arraydescr) + call(0, p0, p1, 0, 0, 0, descr=arraycopydescr) jump(p1) ''' expected = ''' Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py Wed Sep 22 16:02:56 2010 @@ -155,19 +155,23 @@ def test_strconcat_pure(self): for dochr in [chr, ]: #unichr]: + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) @dont_look_inside def escape(x): pass def f(n, m): - s = dochr(n) + dochr(m) - if not we_are_jitted(): - escape(s) + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = dochr(n) + dochr(m) + if m > 100: + escape(s) + m -= 1 return 42 - self.interp_operations(f, [65, 66]) - py.test.xfail() - self.check_operations_history(newstr=0, strsetitem=0, - newunicode=0, unicodesetitem=0, - call=0, call_pure=0) + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=0, strsetitem=0, + newunicode=0, unicodesetitem=0, + call=0, call_pure=0) class TestOOtype(StringTests, OOJitMixin): From fijal at codespeak.net Wed Sep 22 16:16:41 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 22 Sep 2010 16:16:41 +0200 (CEST) Subject: [pypy-svn] r77272 - pypy/benchmarks/own Message-ID: <20100922141641.452FC282BFB@codespeak.net> Author: fijal Date: Wed Sep 22 16:16:39 2010 New Revision: 77272 Modified: pypy/benchmarks/own/pyaes.py Log: I claim this is a legitimate speedup. Indeed when using array slices, one can't expect that they won't allocate a new array. Modified: pypy/benchmarks/own/pyaes.py ============================================================================== --- pypy/benchmarks/own/pyaes.py (original) +++ pypy/benchmarks/own/pyaes.py Wed Sep 22 16:16:39 2010 @@ -222,7 +222,9 @@ for i in xrange(4): col = i * 4 - v0, v1, v2, v3 = block[col : col+4] + #v0, v1, v2, v3 = block[col : col+4] + v0, v1, v2, v3 = (block[col], block[col + 1], block[col + 2], + block[col + 3]) block[col ] = mul_by_2[v0] ^ v3 ^ v2 ^ mul_by_3[v1] block[col+1] = mul_by_2[v1] ^ v0 ^ v3 ^ mul_by_3[v2] @@ -245,7 +247,9 @@ for i in xrange(4): col = i * 4 - v0, v1, v2, v3 = block[col : col+4] + v0, v1, v2, v3 = (block[col], block[col + 1], block[col + 2], + block[col + 3]) + #v0, v1, v2, v3 = block[col:col+4] block[col ] = mul_14[v0] ^ mul_9[v3] ^ mul_13[v2] ^ mul_11[v1] block[col+1] = mul_14[v1] ^ mul_9[v0] ^ mul_13[v3] ^ mul_11[v2] From arigo at codespeak.net Wed Sep 22 16:27:19 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 22 Sep 2010 16:27:19 +0200 (CEST) Subject: [pypy-svn] r77273 - in pypy/branch/jit-str/pypy/jit: codewriter codewriter/test metainterp Message-ID: <20100922142719.DA445282BFB@codespeak.net> Author: arigo Date: Wed Sep 22 16:27:18 2010 New Revision: 77273 Modified: pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py pypy/branch/jit-str/pypy/jit/codewriter/test/test_list.py pypy/branch/jit-str/pypy/jit/metainterp/simple_optimize.py Log: Use a regular 'residual_call_xyz' operation to call oopspec functions. There is no reason to have a special one. Modified: pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py Wed Sep 22 16:27:18 2010 @@ -249,11 +249,13 @@ kind = self.callcontrol.guess_call_kind(op) return getattr(self, 'handle_%s_indirect_call' % kind)(op) - def rewrite_call(self, op, namebase, initialargs): + def rewrite_call(self, op, namebase, initialargs, args=None): """Turn 'i0 = direct_call(fn, i1, i2, ref1, ref2)' into 'i0 = xxx_call_ir_i(fn, descr, [i1,i2], [ref1,ref2])'. The name is one of '{residual,direct}_call_{r,ir,irf}_{i,r,f,v}'.""" - lst_i, lst_r, lst_f = self.make_three_lists(op.args[1:]) + if args is None: + args = op.args[1:] + lst_i, lst_r, lst_f = self.make_three_lists(args) reskind = getkind(op.result.concretetype)[0] if lst_f or reskind == 'f': kinds = 'irf' elif lst_i: kinds = 'ir' @@ -1026,11 +1028,13 @@ # Strings and Unicodes. def _handle_oopspec_call(self, op, args, oopspecindex): - cc = self.callcontrol - calldescr = cc.getcalldescr(op, oopspecindex=oopspecindex) - return SpaceOperation('call_oopspec', - [calldescr, op.args[0]] + args, - op.result) + calldescr = self.callcontrol.getcalldescr(op, oopspecindex) + op1 = self.rewrite_call(op, 'residual_call', + [op.args[0], calldescr], + args=args) + if self.callcontrol.calldescr_canraise(calldescr): + op1 = [op1, SpaceOperation('-live-', [], None)] + return op1 def _handle_stroruni_call(self, op, oopspec_name, args): dict = {"stroruni.concat": EffectInfo.OS_STR_CONCAT, Modified: pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py Wed Sep 22 16:27:18 2010 @@ -7,6 +7,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rlist from pypy.translator.unsimplify import varoftype from pypy.jit.codewriter import heaptracker, effectinfo +from pypy.jit.codewriter.flatten import ListOfKind def const(x): return Constant(x, lltype.typeOf(x)) @@ -694,10 +695,10 @@ op = SpaceOperation('direct_call', [const(func), v1, v2], v3) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) - assert op1.opname == 'call_oopspec' - assert op1.args[0] == 'calldescr-%d' % effectinfo.EffectInfo.OS_STR_CONCAT - assert op1.args[1].value == func - assert op1.args[2:] == [v1, v2] + assert op1.opname == 'residual_call_r_r' + assert op1.args[0].value == func + assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_STR_CONCAT + assert op1.args[2] == ListOfKind('ref', [v1, v2]) assert op1.result == v3 def test_unicode_concat(): @@ -712,10 +713,10 @@ op = SpaceOperation('direct_call', [const(func), v1, v2], v3) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) - assert op1.opname == 'call_oopspec' - assert op1.args[0] == 'calldescr-%d' % effectinfo.EffectInfo.OS_UNI_CONCAT - assert op1.args[1].value == func - assert op1.args[2:] == [v1, v2] + assert op1.opname == 'residual_call_r_r' + assert op1.args[0].value == func + assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_UNI_CONCAT + assert op1.args[2] == ListOfKind('ref', [v1, v2]) assert op1.result == v3 def test_str_stringslice_startonly(): @@ -731,11 +732,12 @@ op = SpaceOperation('direct_call', [const(func), v1, v2], v3) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) - assert op1.opname == 'call_oopspec' - assert op1.args[0] == 'calldescr-%d' % ( + assert op1.opname == 'residual_call_ir_r' + assert op1.args[0].value == func + assert op1.args[1] == 'calldescr-%d' % ( effectinfo.EffectInfo.OS_STR_SLICE_STARTONLY) - assert op1.args[1].value == func - assert op1.args[2:] == [v1, v2] + assert op1.args[2] == ListOfKind('int', [v2]) + assert op1.args[3] == ListOfKind('ref', [v1]) assert op1.result == v3 def test_str_stringslice_startstop(): @@ -752,11 +754,12 @@ op = SpaceOperation('direct_call', [const(func), v1, v2, v3], v4) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) - assert op1.opname == 'call_oopspec' - assert op1.args[0] == 'calldescr-%d' % ( + assert op1.opname == 'residual_call_ir_r' + assert op1.args[0].value == func + assert op1.args[1] == 'calldescr-%d' % ( effectinfo.EffectInfo.OS_STR_SLICE_STARTSTOP) - assert op1.args[1].value == func - assert op1.args[2:] == [v1, v2, v3] + assert op1.args[2] == ListOfKind('int', [v2, v3]) + assert op1.args[3] == ListOfKind('ref', [v1]) assert op1.result == v4 def test_str_stringslice_minusone(): @@ -770,11 +773,11 @@ op = SpaceOperation('direct_call', [const(func), v1], v2) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) - assert op1.opname == 'call_oopspec' - assert op1.args[0] == 'calldescr-%d' % ( + assert op1.opname == 'residual_call_r_r' + assert op1.args[0].value == func + assert op1.args[1] == 'calldescr-%d' % ( effectinfo.EffectInfo.OS_STR_SLICE_MINUSONE) - assert op1.args[1].value == func - assert op1.args[2:] == [v1] + assert op1.args[2] == ListOfKind('ref', [v1]) assert op1.result == v2 def test_list_ll_arraycopy(): @@ -793,8 +796,8 @@ op = SpaceOperation('direct_call', [const(func), v1, v2, v3, v4, v5], v6) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) - assert op1.opname == 'call_oopspec' - assert op1.args[0] == 'calldescr-%d' % effectinfo.EffectInfo.OS_ARRAYCOPY - assert op1.args[1].value == func - assert op1.args[2:] == [v1, v2, v3, v4, v5] - assert op1.result == v6 + assert op1.opname == 'residual_call_ir_v' + assert op1.args[0].value == func + assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_ARRAYCOPY + assert op1.args[2] == ListOfKind('int', [v3, v4, v5]) + assert op1.args[3] == ListOfKind('ref', [v1, v2]) Modified: pypy/branch/jit-str/pypy/jit/codewriter/test/test_list.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/test/test_list.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/test/test_list.py Wed Sep 22 16:27:18 2010 @@ -44,6 +44,8 @@ return '' else: return '' % self.oopspecindex + def calldescr_canraise(self, calldescr): + return False def builtin_test(oopspec_name, args, RESTYPE, expected): v_result = varoftype(RESTYPE) @@ -103,7 +105,7 @@ varoftype(lltype.Signed), varoftype(lltype.Signed)], lltype.Void, """ - call_oopspec , $'myfunc', %r0, %r1, %i0, %i1, %i2 + residual_call_ir_v $'myfunc', , I[%i0, %i1, %i2], R[%r0, %r1] """) def test_fixed_getitem(): Modified: pypy/branch/jit-str/pypy/jit/metainterp/simple_optimize.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/simple_optimize.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/simple_optimize.py Wed Sep 22 16:27:18 2010 @@ -9,12 +9,10 @@ def transform(op): from pypy.jit.metainterp.history import AbstractDescr - # Rename CALL_PURE and CALL_OOPSPEC to CALL. + # Rename CALL_PURE to CALL. # Simplify the VIRTUAL_REF_* so that they don't show up in the backend. if op.opnum == rop.CALL_PURE: op = ResOperation(rop.CALL, op.args[1:], op.result, op.descr) - elif op.opnum == rop.CALL_OOPSPEC: - op = ResOperation(rop.CALL, op.args[:], op.result, op.descr) elif op.opnum == rop.VIRTUAL_REF: op = ResOperation(rop.SAME_AS, [op.args[0]], op.result) elif op.opnum == rop.VIRTUAL_REF_FINISH: From fijal at codespeak.net Wed Sep 22 16:40:22 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 22 Sep 2010 16:40:22 +0200 (CEST) Subject: [pypy-svn] r77274 - in pypy/branch/jitffi/pypy/rlib: . test Message-ID: <20100922144022.E0B6D282BFB@codespeak.net> Author: fijal Date: Wed Sep 22 16:40:21 2010 New Revision: 77274 Added: pypy/branch/jitffi/pypy/rlib/clibffi.py - copied, changed from r77159, pypy/branch/jitffi/pypy/rlib/libffi.py Removed: pypy/branch/jitffi/pypy/rlib/libffi.py Modified: pypy/branch/jitffi/pypy/rlib/test/test_libffi.py Log: Goal is to split current rlib/libffi into libffi.so specific-part (clibffi) and interface part. Purpose is to jit stuff. First part - dummy copy and * import Copied: pypy/branch/jitffi/pypy/rlib/clibffi.py (from r77159, pypy/branch/jitffi/pypy/rlib/libffi.py) ============================================================================== --- pypy/branch/jitffi/pypy/rlib/libffi.py (original) +++ pypy/branch/jitffi/pypy/rlib/clibffi.py Wed Sep 22 16:40:21 2010 @@ -467,7 +467,6 @@ lltype.free(ll_args, flavor='raw') check_fficall_result(ffires, self.flags) - class FuncPtr(AbstractFuncPtr): ll_args = lltype.nullptr(rffi.VOIDPP.TO) ll_result = lltype.nullptr(rffi.VOIDP.TO) @@ -510,7 +509,10 @@ push_arg_as_ffiptr(self.argtypes[self.pushed_args], value, self.ll_args[self.pushed_args]) self.pushed_args += 1 - push_arg._annspecialcase_ = 'specialize:argtype(1)' + # XXX this is bad, fix it somehow in the future, but specialize:argtype + # doesn't work correctly with mixing non-negative and normal integers + push_arg._annenforceargs_ = [None, int] + #push_arg._annspecialcase_ = 'specialize:argtype(1)' push_arg.oopspec = 'libffi_push_arg(self, value)' def _check_args(self): @@ -549,7 +551,7 @@ self.ll_result = lltype.nullptr(rffi.VOIDP.TO) AbstractFuncPtr.__del__(self) -class CDLL: +class CDLL(object): def __init__(self, libname, unload_on_finalization=True): """Load the library, or raises DLOpenError.""" self.unload_on_finalization = unload_on_finalization Modified: pypy/branch/jitffi/pypy/rlib/test/test_libffi.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/test/test_libffi.py (original) +++ pypy/branch/jitffi/pypy/rlib/test/test_libffi.py Wed Sep 22 16:40:21 2010 @@ -27,7 +27,7 @@ return 'libm.so' -class TestLibffi: +class TestLibffi(object): def setup_method(self, meth): ALLOCATED.clear() From fijal at codespeak.net Wed Sep 22 16:57:45 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 22 Sep 2010 16:57:45 +0200 (CEST) Subject: [pypy-svn] r77275 - pypy/branch/jitffi/pypy/rlib Message-ID: <20100922145745.A2406282BFB@codespeak.net> Author: fijal Date: Wed Sep 22 16:57:44 2010 New Revision: 77275 Modified: pypy/branch/jitffi/pypy/rlib/clibffi.py Log: Remove a parameter that noone seems to use Modified: pypy/branch/jitffi/pypy/rlib/clibffi.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/clibffi.py (original) +++ pypy/branch/jitffi/pypy/rlib/clibffi.py Wed Sep 22 16:57:44 2010 @@ -552,16 +552,15 @@ AbstractFuncPtr.__del__(self) class CDLL(object): - def __init__(self, libname, unload_on_finalization=True): + def __init__(self, libname): """Load the library, or raises DLOpenError.""" - self.unload_on_finalization = unload_on_finalization self.lib = lltype.nullptr(rffi.CCHARP.TO) ll_libname = rffi.str2charp(libname) self.lib = dlopen(ll_libname) lltype.free(ll_libname, flavor='raw') def __del__(self): - if self.lib and self.unload_on_finalization: + if self.lib: dlclose(self.lib) self.lib = lltype.nullptr(rffi.CCHARP.TO) From antocuni at codespeak.net Wed Sep 22 17:34:49 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Wed, 22 Sep 2010 17:34:49 +0200 (CEST) Subject: [pypy-svn] r77277 - in pypy/branch/jitffi/pypy/rlib: . test Message-ID: <20100922153449.6F535282BFB@codespeak.net> Author: antocuni Date: Wed Sep 22 17:34:47 2010 New Revision: 77277 Added: pypy/branch/jitffi/pypy/rlib/jitffi.py (contents, props changed) pypy/branch/jitffi/pypy/rlib/test/test_jitffi.py (contents, props changed) Log: first try for a jit-friendly interface to libffi Added: pypy/branch/jitffi/pypy/rlib/jitffi.py ============================================================================== --- (empty file) +++ pypy/branch/jitffi/pypy/rlib/jitffi.py Wed Sep 22 17:34:47 2010 @@ -0,0 +1,37 @@ +class AbstractArg(object): + next = None + +class IntArg(AbstractArg): + + def __init__(self, intval): + self.intval = intval + + def push(self, funcptr): + funcptr.push_arg(self.intval) + +class FloatArg(AbstractArg): + + def __init__(self, floatval): + self.floatval = floatval + + def push(self, funcptr): + funcptr.push_arg(self.floatval) + + +class Func(object): + + def __init__(self, funcptr): + # XXX: for now, this is just a wrapper around libffi.FuncPtr, but in + # the future it will replace it completely + self.funcptr = funcptr + + def call(self, argchain, RESULT): + # implementation detail + arg = argchain + while arg: + arg.push(self.funcptr) + arg = arg.next + return self.funcptr.call(RESULT) + call._annspecialcase_ = 'specialize:arg(1)' + + Added: pypy/branch/jitffi/pypy/rlib/test/test_jitffi.py ============================================================================== --- (empty file) +++ pypy/branch/jitffi/pypy/rlib/test/test_jitffi.py Wed Sep 22 17:34:47 2010 @@ -0,0 +1,23 @@ +import sys +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.rlib.clibffi import CDLL, ffi_type_double +from pypy.rlib.jitffi import Func, FloatArg +from pypy.rlib.test.test_libffi import get_libc_name, get_libm_name + +class TestJitffi(object): + + def get_libc(self): + return CDLL(get_libc_name()) + + def get_libm(self): + return CDLL(get_libm_name(sys.platform)) + + def test_call_argchain(self): + libm = self.get_libm() + pow_ptr = libm.getpointer('pow', [ffi_type_double, ffi_type_double], + ffi_type_double) + pow = Func(pow_ptr) + argchain = FloatArg(2.0) + argchain.next = FloatArg(3.0) + res = pow.call(argchain, rffi.DOUBLE) + assert res == 8.0 From arigo at codespeak.net Wed Sep 22 17:58:54 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 22 Sep 2010 17:58:54 +0200 (CEST) Subject: [pypy-svn] r77278 - in pypy/branch/jit-str/pypy: jit/codewriter jit/metainterp/optimizeopt jit/metainterp/test rpython/lltypesystem Message-ID: <20100922155854.63BCE282BFB@codespeak.net> Author: arigo Date: Wed Sep 22 17:58:52 2010 New Revision: 77278 Modified: pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py pypy/branch/jit-str/pypy/rpython/lltypesystem/rlist.py pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py Log: Correct a few typos. Implement str_concat, but not forcing of them (that's the next test). Modified: pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py Wed Sep 22 17:58:52 2010 @@ -89,7 +89,8 @@ return EffectInfo(readonly_descrs_fields, write_descrs_fields, write_descrs_arrays, - extraeffect) + extraeffect, + oopspecindex) def consider_struct(TYPE, fieldname): if fieldType(TYPE, fieldname) is lltype.Void: Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py Wed Sep 22 17:58:52 2010 @@ -195,7 +195,9 @@ def _make_virtual(self, modifier): return modifier.make_varray(self.arraydescr) + class VStringPlainValue(AbstractVirtualValue): + """A string built with newstr(const).""" def __init__(self, optimizer, size, keybox, source_op=None): AbstractVirtualValue.__init__(self, optimizer, keybox, source_op) @@ -224,14 +226,38 @@ def get_args_for_fail(self, modifier): if self.box is None and not modifier.already_seen_virtual(self.keybox): - charboxes = [box.get_key_box() for box in self._chars] + charboxes = [value.get_key_box() for value in self._chars] modifier.register_virtual_fields(self.keybox, charboxes) - for box in self._chars: - box.get_args_for_fail(modifier) + for value in self._chars: + value.get_args_for_fail(modifier) def _make_virtual(self, modifier): return modifier.make_vstrconcat() + +class VStringConcatValue(AbstractVirtualValue): + """The concatenation of two other strings.""" + + def __init__(self, optimizer, keybox): + AbstractVirtualValue.__init__(self, optimizer, keybox) + self._left = None + self._right = None + + def _really_force(self): + xxx + + def get_args_for_fail(self, modifier): + if self.box is None and not modifier.already_seen_virtual(self.keybox): + leftbox = self._left.get_key_box() + rightbox = self._right.get_key_box() + modifier.register_virtual_fields(self.keybox, [leftbox, rightbox]) + self._left.get_args_for_fail(modifier) + self._right.get_args_for_fail(modifier) + + def _make_virtual(self, modifier): + return modifier.make_vstrconcat() + + class __extend__(SpecNode): def setup_virtual_node(self, optimizer, box, newinputargs): raise NotImplementedError @@ -325,6 +351,11 @@ self.make_equal_to(box, vvalue) return vvalue + def make_vstring_concat(self, box): + vvalue = VStringConcatValue(self.optimizer, box) + self.make_equal_to(box, vvalue) + return vvalue + def optimize_JUMP(self, op): orgop = self.optimizer.loop.operations[-1] exitargs = [] @@ -520,7 +551,7 @@ def optimize_STRGETITEM(self, op): value = self.getvalue(op.args[0]) - if value.is_virtual() and isinstance(value, VStringPlainValue): + if isinstance(value, VStringPlainValue): # even if no longer virtual indexbox = self.get_constant_box(op.args[1]) if indexbox is not None: charvalue = value.getitem(indexbox.getint()) @@ -531,12 +562,18 @@ def optimize_STRLEN(self, op): value = self.getvalue(op.args[0]) - if value.is_virtual(): + if isinstance(value, VStringPlainValue): # even if no longer virtual self.make_constant_int(op.result, value.getlength()) else: value.ensure_nonnull() self.emit_operation(op) + def opt_call_oopspec_STR_CONCAT(self, op): + value = self.make_vstring_concat(op.result) + value._left = self.getvalue(op.args[1]) + value._right = self.getvalue(op.args[2]) + return True + def propagate_forward(self, op): opnum = op.opnum for value, func in optimize_ops: Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py Wed Sep 22 17:58:52 2010 @@ -150,6 +150,7 @@ return total res = self.meta_interp(f, [6]) assert res == sum(map(ord, 'sgn9OE!')) + py.test.xfail() self.check_loops(call=0, call_pure=0, newstr=0, strgetitem=1, strsetitem=0, strlen=0) Modified: pypy/branch/jit-str/pypy/rpython/lltypesystem/rlist.py ============================================================================== --- pypy/branch/jit-str/pypy/rpython/lltypesystem/rlist.py (original) +++ pypy/branch/jit-str/pypy/rpython/lltypesystem/rlist.py Wed Sep 22 17:58:52 2010 @@ -159,7 +159,6 @@ if 'item_repr' not in self.__dict__: self.external_item_repr, self.item_repr = externalvsinternal(self.rtyper, self._item_repr_computer()) if isinstance(self.LIST, GcForwardReference): - ITEM = self.item_repr.lowleveltype ITEMARRAY = self.get_itemarray_lowleveltype() self.LIST.become(ITEMARRAY) Modified: pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py ============================================================================== --- pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py (original) +++ pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py Wed Sep 22 17:58:52 2010 @@ -4,7 +4,7 @@ from pypy.rlib.objectmodel import malloc_zero_filled, we_are_translated from pypy.rlib.objectmodel import _hash_string, enforceargs from pypy.rlib.debug import ll_assert -from pypy.rlib.jit import purefunction +from pypy.rlib.jit import purefunction, we_are_jitted from pypy.rpython.robject import PyObjRepr, pyobj_repr from pypy.rpython.rmodel import inputconst, IntegerRepr from pypy.rpython.rstr import AbstractStringRepr,AbstractCharRepr,\ @@ -318,7 +318,6 @@ def ll_strfasthash(s): return s.hash # assumes that the hash is already computed - @purefunction def ll_strconcat(s1, s2): len1 = len(s1.chars) len2 = len(s2.chars) @@ -326,7 +325,7 @@ s1.copy_contents(s1, newstr, 0, 0, len1) s1.copy_contents(s2, newstr, 0, len1, len2) return newstr - #ll_strconcat.oopspec = 'stroruni.concat(s1, s2)' + ll_strconcat.oopspec = 'stroruni.concat(s1, s2)' @purefunction def ll_strip(s, ch, left, right): @@ -704,19 +703,26 @@ return newstr ll_stringslice_startonly.oopspec = 'stroruni.slice_startonly(s1, start)' - def ll_stringslice_startstop(s1, start, stop): - if stop >= len(s1.chars): - if start == 0: - return s1 - stop = len(s1.chars) + def _ll_stringslice_startstop(s1, start, stop): newstr = s1.malloc(stop - start) assert start >= 0 lgt = stop - start assert lgt >= 0 s1.copy_contents(s1, newstr, start, 0, lgt) return newstr - ll_stringslice_startstop.oopspec = ('stroruni.slice_startstop(s1, ' - 'start, stop)') + _ll_stringslice_startstop.oopspec = ('stroruni.slice_startstop(s1, ' + 'start, stop)') + + def ll_stringslice_startstop(s1, start, stop): + if we_are_jitted(): + if stop > len(s1.chars): + stop = len(s1.chars) + else: + if stop >= len(s1.chars): + if start == 0: + return s1 + stop = len(s1.chars) + return LLHelpers._ll_stringslice_startstop(s1, start, stop) def ll_stringslice_minusone(s1): newlen = len(s1.chars) - 1 From arigo at codespeak.net Wed Sep 22 18:29:09 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 22 Sep 2010 18:29:09 +0200 (CEST) Subject: [pypy-svn] r77279 - pypy/extradoc/talk/pepm2011 Message-ID: <20100922162909.10E20282C03@codespeak.net> Author: arigo Date: Wed Sep 22 18:29:08 2010 New Revision: 77279 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: Two typos. Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Wed Sep 22 18:29:08 2010 @@ -107,9 +107,9 @@ heap-structure is allocated for them, that contains the actual value. Type dispatching is the process of finding the concrete implementation that is -applicable to the objects at hand when doing a generic operation at hand. An +applicable to the objects at hand when doing a generic operation on them. An example would be the addition of two objects: The addition needs to check what -the concrete objects are that should be added are, and choose the implementation +the concrete objects that should be added are, and choose the implementation that is fitting for them. Last year, we wrote a paper \cite{XXX} about how PyPy's meta-JIT From afa at codespeak.net Wed Sep 22 20:19:13 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Wed, 22 Sep 2010 20:19:13 +0200 (CEST) Subject: [pypy-svn] r77280 - in pypy/branch/fast-forward/pypy: annotation rpython translator/c Message-ID: <20100922181913.C73AA282BFB@codespeak.net> Author: afa Date: Wed Sep 22 20:19:12 2010 New Revision: 77280 Modified: pypy/branch/fast-forward/pypy/annotation/binaryop.py pypy/branch/fast-forward/pypy/annotation/model.py pypy/branch/fast-forward/pypy/rpython/rfloat.py pypy/branch/fast-forward/pypy/translator/c/primitive.py Log: Fix translation of the "long double" datatype Modified: pypy/branch/fast-forward/pypy/annotation/binaryop.py ============================================================================== --- pypy/branch/fast-forward/pypy/annotation/binaryop.py (original) +++ pypy/branch/fast-forward/pypy/annotation/binaryop.py Wed Sep 22 20:19:12 2010 @@ -13,7 +13,7 @@ from pypy.annotation.model import SomePBC, SomeFloat, s_None from pypy.annotation.model import SomeExternalObject, SomeWeakRef from pypy.annotation.model import SomeAddress, SomeTypedAddressAccess -from pypy.annotation.model import SomeSingleFloat +from pypy.annotation.model import SomeSingleFloat, SomeLongFloat from pypy.annotation.model import unionof, UnionError, missing_operation from pypy.annotation.model import isdegenerated, TLS from pypy.annotation.model import read_can_only_throw @@ -490,6 +490,12 @@ return SomeSingleFloat() +class __extend__(pairtype(SomeLongFloat, SomeLongFloat)): + + def union((flt1, flt2)): + return SomeLongFloat() + + class __extend__(pairtype(SomeList, SomeList)): def union((lst1, lst2)): Modified: pypy/branch/fast-forward/pypy/annotation/model.py ============================================================================== --- pypy/branch/fast-forward/pypy/annotation/model.py (original) +++ pypy/branch/fast-forward/pypy/annotation/model.py Wed Sep 22 20:19:12 2010 @@ -34,7 +34,7 @@ from pypy.tool.pairtype import pair, extendabletype from pypy.tool.tls import tlsobject from pypy.rlib.rarithmetic import r_uint, r_ulonglong, base_int -from pypy.rlib.rarithmetic import r_singlefloat, isnan +from pypy.rlib.rarithmetic import r_singlefloat, r_longfloat, isnan import inspect, weakref DEBUG = False # set to False to disable recording of debugging information @@ -182,6 +182,15 @@ def can_be_none(self): return False +class SomeLongFloat(SomeObject): + "Stands for an r_longfloat." + # No operation supported, not even union with a regular float + knowntype = r_longfloat + immutable = True + + def can_be_none(self): + return False + class SomeInteger(SomeFloat): "Stands for an object which is known to be an integer." knowntype = int @@ -580,6 +589,7 @@ (SomeInteger(knowntype=r_ulonglong), NUMBER), (SomeFloat(), lltype.Float), (SomeSingleFloat(), lltype.SingleFloat), + (SomeLongFloat(), lltype.LongFloat), (SomeChar(), lltype.Char), (SomeUnicodeCodePoint(), lltype.UniChar), (SomeAddress(), llmemory.Address), Modified: pypy/branch/fast-forward/pypy/rpython/rfloat.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/rfloat.py (original) +++ pypy/branch/fast-forward/pypy/rpython/rfloat.py Wed Sep 22 20:19:12 2010 @@ -209,8 +209,8 @@ _callable=lambda x: pyobjectptr(x)) return NotImplemented -# ____________________________________________________________ -# Support for r_singlefloat from pypy.rlib.rarithmetic +# ______________________________________________________________________ +# Support for r_singlefloat and r_longfloat from pypy.rlib.rarithmetic from pypy.rpython.lltypesystem import lltype from pypy.rpython.rmodel import Repr @@ -230,3 +230,19 @@ # we use cast_primitive to go between Float and SingleFloat. return hop.genop('cast_primitive', [v], resulttype = lltype.Float) + +class __extend__(annmodel.SomeLongFloat): + def rtyper_makerepr(self, rtyper): + return LongFloatRepr() + def rtyper_makekey(self): + return self.__class__, + +class LongFloatRepr(Repr): + lowleveltype = lltype.LongFloat + + def rtype_float(self, hop): + v, = hop.inputargs(lltype.LongFloat) + hop.exception_cannot_occur() + # we use cast_primitive to go between Float and LongFloat. + return hop.genop('cast_primitive', [v], + resulttype = lltype.Float) Modified: pypy/branch/fast-forward/pypy/translator/c/primitive.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/primitive.py (original) +++ pypy/branch/fast-forward/pypy/translator/c/primitive.py Wed Sep 22 20:19:12 2010 @@ -100,6 +100,7 @@ x = repr(value) assert not x.startswith('n') return x +name_longfloat = name_float def name_singlefloat(value, db): value = float(value) @@ -167,6 +168,7 @@ Unsigned: name_unsigned, Float: name_float, SingleFloat: name_singlefloat, + LongFloat: name_longfloat, Char: name_char, UniChar: name_unichar, Bool: name_bool, @@ -182,6 +184,7 @@ Unsigned: 'unsigned long @', Float: 'double @', SingleFloat: 'float @', + LongFloat: 'long double @', Char: 'char @', UniChar: 'wchar_t @', Bool: 'bool_t @', From fijal at codespeak.net Wed Sep 22 21:39:16 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 22 Sep 2010 21:39:16 +0200 (CEST) Subject: [pypy-svn] r77281 - pypy/branch/jitffi/pypy/rlib Message-ID: <20100922193916.DB090282B9D@codespeak.net> Author: fijal Date: Wed Sep 22 21:39:14 2010 New Revision: 77281 Modified: pypy/branch/jitffi/pypy/rlib/jitffi.py Log: Some docstrings Modified: pypy/branch/jitffi/pypy/rlib/jitffi.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/jitffi.py (original) +++ pypy/branch/jitffi/pypy/rlib/jitffi.py Wed Sep 22 21:39:14 2010 @@ -1,7 +1,10 @@ + class AbstractArg(object): next = None class IntArg(AbstractArg): + """ An argument holding an integer + """ def __init__(self, intval): self.intval = intval @@ -10,6 +13,8 @@ funcptr.push_arg(self.intval) class FloatArg(AbstractArg): + """ An argument holding a float + """ def __init__(self, floatval): self.floatval = floatval @@ -17,7 +22,6 @@ def push(self, funcptr): funcptr.push_arg(self.floatval) - class Func(object): def __init__(self, funcptr): From fijal at codespeak.net Wed Sep 22 21:41:59 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 22 Sep 2010 21:41:59 +0200 (CEST) Subject: [pypy-svn] r77282 - pypy/branch/jitffi/pypy/rlib Message-ID: <20100922194159.CBD02282B9D@codespeak.net> Author: fijal Date: Wed Sep 22 21:41:58 2010 New Revision: 77282 Added: pypy/branch/jitffi/pypy/rlib/libffi.py - copied, changed from r77281, pypy/branch/jitffi/pypy/rlib/jitffi.py Removed: pypy/branch/jitffi/pypy/rlib/jitffi.py Log: Rename jitffi to libffi for now and do * import of everything from clibffi Copied: pypy/branch/jitffi/pypy/rlib/libffi.py (from r77281, pypy/branch/jitffi/pypy/rlib/jitffi.py) ============================================================================== --- pypy/branch/jitffi/pypy/rlib/jitffi.py (original) +++ pypy/branch/jitffi/pypy/rlib/libffi.py Wed Sep 22 21:41:58 2010 @@ -1,4 +1,6 @@ +from pypy.rlib.clibffi import * + class AbstractArg(object): next = None From fijal at codespeak.net Wed Sep 22 21:45:14 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 22 Sep 2010 21:45:14 +0200 (CEST) Subject: [pypy-svn] r77283 - pypy/branch/jitffi/pypy/rlib Message-ID: <20100922194514.CBB62282B9D@codespeak.net> Author: fijal Date: Wed Sep 22 21:45:13 2010 New Revision: 77283 Modified: pypy/branch/jitffi/pypy/rlib/libffi.py Log: fix specialize and use a decorator Modified: pypy/branch/jitffi/pypy/rlib/libffi.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/libffi.py (original) +++ pypy/branch/jitffi/pypy/rlib/libffi.py Wed Sep 22 21:45:13 2010 @@ -1,5 +1,6 @@ from pypy.rlib.clibffi import * +from pypy.rlib.objectmodel import specialize class AbstractArg(object): next = None @@ -31,6 +32,7 @@ # the future it will replace it completely self.funcptr = funcptr + @specialize.arg(2) def call(self, argchain, RESULT): # implementation detail arg = argchain @@ -38,6 +40,5 @@ arg.push(self.funcptr) arg = arg.next return self.funcptr.call(RESULT) - call._annspecialcase_ = 'specialize:arg(1)' From fijal at codespeak.net Wed Sep 22 21:46:32 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 22 Sep 2010 21:46:32 +0200 (CEST) Subject: [pypy-svn] r77284 - pypy/branch/jitffi/pypy/jit/metainterp/test Message-ID: <20100922194632.B4A18282B9D@codespeak.net> Author: fijal Date: Wed Sep 22 21:46:31 2010 New Revision: 77284 Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_direct_call.py Log: adapt test (still no assert) Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_direct_call.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/test/test_direct_call.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_direct_call.py Wed Sep 22 21:46:31 2010 @@ -2,7 +2,8 @@ import py from pypy.rlib.jit import JitDriver from pypy.jit.metainterp.test.test_basic import LLJitMixin -from pypy.rlib.libffi import FuncPtr, CDLL, ffi_type_sint +from pypy.rlib.clibffi import FuncPtr, CDLL, ffi_type_sint +from pypy.rlib.libffi import IntArg, Func from pypy.tool.udir import udir from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.translator.platform import platform @@ -24,17 +25,19 @@ standalone=False)) def test_one(self): - driver = JitDriver(reds = ['n', 'fn'], greens = []) + driver = JitDriver(reds = ['n', 'func'], greens = []) def f(n): cdll = CDLL(self.lib_name) fn = cdll.getpointer('sum_xy', [ffi_type_sint, ffi_type_sint], ffi_type_sint) + func = Func(fn) while n < 10: - driver.jit_merge_point(n=n, fn=fn) - driver.can_enter_jit(n=n, fn=fn) - fn.push_arg(n) - fn.push_arg(1) - n = fn.call(lltype.Signed) + driver.jit_merge_point(n=n, func=func) + driver.can_enter_jit(n=n, func=func) + arg0 = IntArg(n) + arg1 = IntArg(1) + arg0.next = arg1 + n = func.call(arg0, lltype.Signed) self.meta_interp(f, [0]) From fijal at codespeak.net Wed Sep 22 22:24:52 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 22 Sep 2010 22:24:52 +0200 (CEST) Subject: [pypy-svn] r77285 - pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt Message-ID: <20100922202452.BA399282BFB@codespeak.net> Author: fijal Date: Wed Sep 22 22:24:51 2010 New Revision: 77285 Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/virtualize.py Log: try a bit harder to have a __repr__ for VirtualValue Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/virtualize.py Wed Sep 22 22:24:51 2010 @@ -134,6 +134,11 @@ fielddescrs = self._get_field_descr_list() return modifier.make_virtual(self.known_class, fielddescrs) + def __repr__(self): + cls_name = self.known_class.value.adr.ptr._obj._TYPE._name + field_names = [field.name for field in self._fields] + return "" % (cls_name, field_names) + class VStructValue(AbstractVirtualStructValue): def __init__(self, optimizer, structdescr, keybox, source_op=None): From afa at codespeak.net Thu Sep 23 02:10:36 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Thu, 23 Sep 2010 02:10:36 +0200 (CEST) Subject: [pypy-svn] r77286 - in pypy/branch/fast-forward/pypy: module/_socket module/_socket/test rlib Message-ID: <20100923001036.88780282B9D@codespeak.net> Author: afa Date: Thu Sep 23 02:10:34 2010 New Revision: 77286 Modified: pypy/branch/fast-forward/pypy/module/_socket/interp_socket.py pypy/branch/fast-forward/pypy/module/_socket/test/test_sock_app.py pypy/branch/fast-forward/pypy/rlib/_rsocket_rffi.py Log: Add socket.socket.ioctl() method on Windows. Modified: pypy/branch/fast-forward/pypy/module/_socket/interp_socket.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_socket/interp_socket.py (original) +++ pypy/branch/fast-forward/pypy/module/_socket/interp_socket.py Thu Sep 23 02:10:34 2010 @@ -4,9 +4,10 @@ from pypy.interpreter.gateway import ObjSpace, W_Root, NoneNotWrapped from pypy.interpreter.gateway import interp2app from pypy.rlib.rarithmetic import intmask +from pypy.rlib import rsocket from pypy.rlib.rsocket import RSocket, AF_INET, SOCK_STREAM from pypy.rlib.rsocket import SocketError, SocketErrorWithErrno -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter import gateway class W_RSocket(Wrappable, RSocket): @@ -337,7 +338,51 @@ except SocketError, e: raise converted_error(space, e) recvfrom_into_w.unwrap_spec = ['self', ObjSpace, W_Root, int, int] - + + def ioctl_w(self, space, cmd, w_option): + from pypy.rpython.lltypesystem import rffi, lltype + from pypy.rlib import rwin32 + from pypy.rlib.rsocket import _c + + recv_ptr = lltype.malloc(rwin32.LPDWORD.TO, 1, flavor='raw') + try: + if cmd == _c.SIO_RCVALL: + option_ptr = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') + try: + option_ptr[0] = space.uint_w(w_option) + option_ptr = rffi.cast(rffi.VOIDP, option_ptr) + res = _c.WSAIoctl( + self.fd, cmd, + option_ptr, rffi.sizeof(rffi.INTP), + rffi.NULL, 0, recv_ptr, rffi.NULL, rffi.NULL) + if res < 0: + raise error() + finally: + lltype.free(option_ptr, flavor='raw') + elif cmd == _c.SIO_KEEPALIVE_VALS: + w_onoff, w_time, w_interval = space.unpackiterable(w_option) + option_ptr = lltype.malloc(_c.tcp_keepalive, flavor='raw') + try: + option_ptr.c_onoff = space.uint_w(w_onoff) + option_ptr.c_keepalivetime = space.uint_w(w_time) + option_ptr.c_keepaliveinterval = space.uint_w(w_interval) + option_ptr = rffi.cast(rffi.VOIDP, option_ptr) + res = _c.WSAIoctl( + self.fd, cmd, + option_ptr, rffi.sizeof(_c.tcp_keepalive), + rffi.NULL, 0, recv_ptr, rffi.NULL, rffi.NULL) + if res < 0: + raise error() + finally: + lltype.free(option_ptr, flavor='raw') + else: + raise operationerrfmt(space.w_ValueError, + "invalid ioctl command %d", cmd) + return space.wrap(recv_ptr[0]) + finally: + lltype.free(recv_ptr, flavor='raw') + ioctl_w.unwrap_spec = ['self', ObjSpace, int, W_Root] + def shutdown_w(self, space, how): """shutdown(flag) @@ -429,6 +474,8 @@ for name in ('dup',): if not hasattr(RSocket, name): socketmethodnames.remove(name) +if hasattr(rsocket._c, 'WSAIoctl'): + socketmethodnames.append('ioctl') socketmethods = {} for methodname in socketmethodnames: Modified: pypy/branch/fast-forward/pypy/module/_socket/test/test_sock_app.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_socket/test/test_sock_app.py (original) +++ pypy/branch/fast-forward/pypy/module/_socket/test/test_sock_app.py Thu Sep 23 02:10:34 2010 @@ -471,6 +471,19 @@ (reuse,) = struct.unpack('i', reusestr) assert reuse != 0 + def test_socket_ioctl(self): + import _socket, sys + if sys.platform != 'win32': + skip("win32 only") + assert hasattr(_socket.socket, 'ioctl') + assert hasattr(_socket, 'SIO_RCVALL') + assert hasattr(_socket, 'RCVALL_ON') + assert hasattr(_socket, 'RCVALL_OFF') + assert hasattr(_socket, 'SIO_KEEPALIVE_VALS') + s = _socket.socket() + raises(ValueError, s.ioctl, -1, None) + s.ioctl(_socket.SIO_KEEPALIVE_VALS, (1, 100, 100)) + def test_dup(self): import _socket as socket if not hasattr(socket.socket, 'dup'): Modified: pypy/branch/fast-forward/pypy/rlib/_rsocket_rffi.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/_rsocket_rffi.py (original) +++ pypy/branch/fast-forward/pypy/rlib/_rsocket_rffi.py Thu Sep 23 02:10:34 2010 @@ -55,6 +55,7 @@ header_lines = [ '#include ', '#include ', + '#include ', # winsock2 defines AF_UNIX, but not sockaddr_un '#undef AF_UNIX', ] @@ -195,13 +196,20 @@ FD_CONNECT_BIT FD_CLOSE_BIT WSA_IO_PENDING WSA_IO_INCOMPLETE WSA_INVALID_HANDLE WSA_INVALID_PARAMETER WSA_NOT_ENOUGH_MEMORY WSA_OPERATION_ABORTED +SIO_RCVALL SIO_KEEPALIVE_VALS SIOCGIFNAME '''.split() for name in constant_names: setattr(CConfig, name, platform.DefinedConstantInteger(name)) - + +if _WIN32: + # some SDKs define these values with an enum, #ifdef won't work + for name in ('RCVALL_ON', 'RCVALL_OFF'): + setattr(CConfig, name, platform.ConstantInteger(name)) + constant_names.append(name) + constants["BDADDR_ANY"] = "00:00:00:00:00:00" constants["BDADDR_LOCAL"] = "00:00:00:FF:FF:FF" @@ -353,6 +361,12 @@ ('iMaxUdpDg', rffi.USHORT), ('lpVendorInfo', CCHARP)]) + CConfig.tcp_keepalive = platform.Struct( + 'struct tcp_keepalive', + [('onoff', rffi.ULONG), + ('keepalivetime', rffi.ULONG), + ('keepaliveinterval', rffi.ULONG)]) + class cConfig: pass @@ -556,6 +570,7 @@ rffi.INT) elif WIN32: + from pypy.rlib import rwin32 # # The following is for pypy.rlib.rpoll # @@ -579,6 +594,14 @@ lltype.Ptr(WSANETWORKEVENTS)], rffi.INT) + WSAIoctl = external('WSAIoctl', + [socketfd_type, rwin32.DWORD, + rffi.VOIDP, rwin32.DWORD, + rffi.VOIDP, rwin32.DWORD, + rwin32.LPDWORD, rffi.VOIDP, rffi.VOIDP], + rffi.INT) + tcp_keepalive = cConfig.tcp_keepalive + if WIN32: WSAData = cConfig.WSAData WSAStartup = external('WSAStartup', [rffi.INT, lltype.Ptr(WSAData)], From fijal at codespeak.net Thu Sep 23 09:43:26 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Thu, 23 Sep 2010 09:43:26 +0200 (CEST) Subject: [pypy-svn] r77287 - pypy/branch/jitffi/pypy/rlib Message-ID: <20100923074326.2DED1282BFB@codespeak.net> Author: fijal Date: Thu Sep 23 09:43:24 2010 New Revision: 77287 Modified: pypy/branch/jitffi/pypy/rlib/clibffi.py pypy/branch/jitffi/pypy/rlib/libffi.py Log: A bunch of jit-related changes. Modified: pypy/branch/jitffi/pypy/rlib/clibffi.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/clibffi.py (original) +++ pypy/branch/jitffi/pypy/rlib/clibffi.py Thu Sep 23 09:43:24 2010 @@ -471,6 +471,8 @@ ll_args = lltype.nullptr(rffi.VOIDPP.TO) ll_result = lltype.nullptr(rffi.VOIDP.TO) + _immutable_fields_ = ['funcsym'] # XXX probably more + def __init__(self, name, argtypes, restype, funcsym, flags=FUNCFLAG_CDECL, keepalive=None): # initialize each one of pointers with null @@ -491,9 +493,9 @@ flavor='raw') def push_arg(self, value): - if self.pushed_args == self.argnum: - raise TypeError("Too many arguments, eats %d, pushed %d" % - (self.argnum, self.argnum + 1)) + #if self.pushed_args == self.argnum: + # raise TypeError("Too many arguments, eats %d, pushed %d" % + # (self.argnum, self.argnum + 1)) if not we_are_translated(): TP = lltype.typeOf(value) if isinstance(TP, lltype.Ptr): @@ -522,7 +524,7 @@ def _clean_args(self): self.pushed_args = 0 - def call(self, RES_TP): + def call(self, funcsym, RES_TP): self._check_args() ffires = c_ffi_call(self.ll_cif, self.funcsym, rffi.cast(rffi.VOIDP, self.ll_result), @@ -535,8 +537,8 @@ self._clean_args() check_fficall_result(ffires, self.flags) return res - call._annspecialcase_ = 'specialize:arg(1)' - call.oopspec = 'libffi_call(self, RES_TP)' + call._annspecialcase_ = 'specialize:arg(2)' + call.oopspec = 'libffi_call(self, funcsym, RES_TP)' def __del__(self): if self.ll_args: Modified: pypy/branch/jitffi/pypy/rlib/libffi.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/libffi.py (original) +++ pypy/branch/jitffi/pypy/rlib/libffi.py Thu Sep 23 09:43:24 2010 @@ -1,6 +1,7 @@ from pypy.rlib.clibffi import * from pypy.rlib.objectmodel import specialize +from pypy.rlib import jit class AbstractArg(object): next = None @@ -32,6 +33,7 @@ # the future it will replace it completely self.funcptr = funcptr + @jit.unroll_safe @specialize.arg(2) def call(self, argchain, RESULT): # implementation detail @@ -39,6 +41,4 @@ while arg: arg.push(self.funcptr) arg = arg.next - return self.funcptr.call(RESULT) - - + return self.funcptr.call(self.funcptr.funcsym, RESULT) From fijal at codespeak.net Thu Sep 23 10:40:34 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Thu, 23 Sep 2010 10:40:34 +0200 (CEST) Subject: [pypy-svn] r77288 - pypy/trunk/pypy/jit/tool Message-ID: <20100923084034.C93BD282BFB@codespeak.net> Author: fijal Date: Thu Sep 23 10:40:33 2010 New Revision: 77288 Modified: pypy/trunk/pypy/jit/tool/traceviewer.py Log: Improve a bit reading of long numbers Modified: pypy/trunk/pypy/jit/tool/traceviewer.py ============================================================================== --- pypy/trunk/pypy/jit/tool/traceviewer.py (original) +++ pypy/trunk/pypy/jit/tool/traceviewer.py Thu Sep 23 10:40:33 2010 @@ -253,9 +253,10 @@ def main(loopfile, use_threshold, view=True): countname = py.path.local(loopfile + '.count') if countname.check(): - counts = [re.split(r' +', line, 1) for line in countname.readlines()] - counts = Counts([(k.strip("\n"), int(v.strip('\n'))) - for v, k in counts]) + counts = [re.split('( 20 and use_threshold: counts.threshold = l[-20] From fijal at codespeak.net Thu Sep 23 11:37:47 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Thu, 23 Sep 2010 11:37:47 +0200 (CEST) Subject: [pypy-svn] r77289 - in pypy/branch/jitffi/pypy/jit: codewriter metainterp metainterp/optimizeopt metainterp/test Message-ID: <20100923093747.754AB282BFB@codespeak.net> Author: fijal Date: Thu Sep 23 11:37:45 2010 New Revision: 77289 Added: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py Modified: pypy/branch/jitffi/pypy/jit/codewriter/support.py pypy/branch/jitffi/pypy/jit/metainterp/executor.py pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/__init__.py pypy/branch/jitffi/pypy/jit/metainterp/resoperation.py pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py Log: A checkin of what I have. Completely unfinished Modified: pypy/branch/jitffi/pypy/jit/codewriter/support.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/codewriter/support.py (original) +++ pypy/branch/jitffi/pypy/jit/codewriter/support.py Thu Sep 23 11:37:45 2010 @@ -227,10 +227,11 @@ func = cast_base_ptr_to_instance(FuncPtr, llfunc) return func.push_arg(value) -def _ll_2_libffi_call(llfunc, RES_TP): +def _ll_3_libffi_call(llfunc, symfunc, RES_TP): from pypy.rlib.libffi import FuncPtr func = cast_base_ptr_to_instance(FuncPtr, llfunc) - return func.call(lltype.Float) # XXX: should be RES_TP, but it doesn't work + return func.call(symfunc, lltype.Signed) +# XXX: should be RES_TP, but it doesn't work # in the following calls to builtins, the JIT is allowed to look inside: Modified: pypy/branch/jitffi/pypy/jit/metainterp/executor.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/executor.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/executor.py Thu Sep 23 11:37:45 2010 @@ -80,6 +80,9 @@ do_call_loopinvariant = do_call do_call_may_force = do_call +def do_call_c(cpu, metainterp, argboxes, descr): + raise NotImplementedError("Should never be called directly") + def do_getarrayitem_gc(cpu, _, arraybox, indexbox, arraydescr): array = arraybox.getref_base() index = indexbox.getint() Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/__init__.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/__init__.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/__init__.py Thu Sep 23 11:37:45 2010 @@ -3,6 +3,7 @@ from intbounds import OptIntBounds from virtualize import OptVirtualize from heap import OptHeap +from ccall import OptCCall def optimize_loop_1(metainterp_sd, loop, virtuals=True): """Optimize loop.operations to make it match the input of loop.specnodes @@ -14,6 +15,7 @@ OptRewrite(), OptVirtualize(), OptHeap(), + OptCCall(), ] optimizer = Optimizer(metainterp_sd, loop, optimizations, virtuals) optimizer.propagate_all_forward() Added: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py ============================================================================== --- (empty file) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py Thu Sep 23 11:37:45 2010 @@ -0,0 +1,30 @@ + +from pypy.jit.metainterp.optimizeutil import _findall +from pypy.rlib.objectmodel import we_are_translated +from optimizer import * + +class OptCCall(Optimization): + def _dissect_virtual_value(self, args_so_far, value): + pass + + def optimize_CALL(self, op): + self.emit_operation(op) + return + args = [] + v = self.getvalue(op.args[2]) + while v: + v = self._dissect_virtual_value(args, v) + import pdb + pdb.set_trace() + return self.optimize_default(op) + + def propagate_forward(self, op): + opnum = op.opnum + for value, func in optimize_ops: + if opnum == value: + func(self, op) + break + else: + self.emit_operation(op) + +optimize_ops = _findall(OptCCall, 'optimize_') Modified: pypy/branch/jitffi/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/resoperation.py Thu Sep 23 11:37:45 2010 @@ -222,10 +222,11 @@ 'COND_CALL_GC_WB', # [objptr, newvalue] (for the write barrier) 'DEBUG_MERGE_POINT/1', # debugging only 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend + 'CALL_C', # call directly C code from here (a function addres comes first) '_CANRAISE_FIRST', # ----- start of can_raise operations ----- 'CALL', - 'CALL_ASSEMBLER', + 'CALL_ASSEMBLER', # call already compiled assembler 'CALL_MAY_FORCE', 'CALL_LOOPINVARIANT', #'OOSEND', # ootype operation Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py Thu Sep 23 11:37:45 2010 @@ -3818,7 +3818,22 @@ """ self.optimize_loop(ops, 'Not, Not', expected) - + def test_arg_pushing(self): + ops = """ + [p1, i0] + p2 = new_with_vtable(ConstClass(IntArgVTable)) + p3 = new_with_vtable(ConstClass(IntArgVTable)) + setfield_gc(p3, Const(1), descr=int_arg_intval) + setfield_gc(p2, p3, descr=int_arg_next) + setfield_gc(p2, i0, descr=int_arg_intval) + i4 = call(13, p1, p2, descr=nonwritedescr) + guard_no_exception() [] + """ + expected = """ + [p1, i0] + call_c( + """ + self.optimize_loop(ops, 'Not, Not', expected) ##class TestOOtype(BaseTestOptimizeOpt, OOtypeMixin): From cfbolz at codespeak.net Thu Sep 23 11:42:02 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Thu, 23 Sep 2010 11:42:02 +0200 (CEST) Subject: [pypy-svn] r77290 - in pypy/extradoc/talk/pepm2011: . figures Message-ID: <20100923094202.67FCB282BFB@codespeak.net> Author: cfbolz Date: Thu Sep 23 11:42:00 2010 New Revision: 77290 Added: pypy/extradoc/talk/pepm2011/figures/ - copied from r77289, user/cfbolz/blog/fig_virtual2/ Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: import blog post Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Thu Sep 23 11:42:00 2010 @@ -89,6 +89,8 @@ \keywords{XXX}% +XXX drop the word "allocation removal" somewhere + \section{Introduction} The goal of a just-in-time compiler for a dynamic language is obviously to @@ -124,12 +126,20 @@ \section{Background} \label{sec:Background} -\subsection{PyPy} -\label{sub:PyPy} - \subsection{Tracing JIT Compilers} \label{sub:JIT_background} +XXX object model and its reflection in traces (e.g. guard\_class before each method call) + +traces and bridges + +arguments to traces + +getting from the interpreter to traces + +\subsection{PyPy} +\label{sub:PyPy} + \section{Escape Analysis in a Tracing JIT} \label{sec:Escape Analysis in a Tracing JIT} @@ -143,16 +153,19 @@ double-dispatching. These classes could be part of the implementation of a very simple interpreter written in RPython. +\begin{figure} \begin{verbatim} class Base(object): def add(self, other): """ add self to other """ raise NotImplementedError("abstract base") def add__int(self, intother): - """ add intother to self, where intother is a Python integer """ + """ add intother to self, + where intother is an integer """ raise NotImplementedError("abstract base") def add__float(self, floatother): - """ add floatother to self, where floatother is a Python float """ + """ add floatother to self, + where floatother is a float """ raise NotImplementedError("abstract base") def is_positive(self): """ returns whether self is positive """ @@ -166,7 +179,8 @@ def add__int(self, intother): return BoxedInteger(intother + self.intval) def add__float(self, floatother): - return BoxedFloat(floatother + float(self.intval)) + floatvalue = floatother + float(self.intval) + return BoxedFloat(floatvalue) def is_positive(self): return self.intval > 0 @@ -176,12 +190,15 @@ def add(self, other): return other.add__float(self.floatval) def add__int(self, intother): - return BoxedFloat(float(intother) + self.floatval) + floatvalue = float(intother) + self.floatval + return BoxedFloat(floatvalue) def add__float(self, floatother): return BoxedFloat(floatother + self.floatval) def is_positive(self): return self.floatval > 0.0 \end{verbatim} +\caption{A simple object model} +\end{figure} Using these classes to implement arithmetic shows the basic problem that a dynamic language implementation has. All the numbers are instances of either @@ -206,64 +223,67 @@ The loop iterates \texttt{y} times, and computes something in the process. To understand the reason why executing this function is slow, here is the trace that is produced by the tracing JIT when executing the function with \texttt{y} -being a \texttt{BoxedInteger}: +being a \texttt{BoxedInteger}: XXX make it clear that this is really a trace specific for BoxedInteger +\begin{figure} \begin{verbatim} -# arguments to the trace: p0, p1 -# inside f: res.add(y) -guard_class(p1, BoxedInteger) - # inside BoxedInteger.add - i2 = getfield_gc(p1, intval) - guard_class(p0, BoxedInteger) - # inside BoxedInteger.add__int - i3 = getfield_gc(p0, intval) - i4 = int_add(i2, i3) - p5 = new(BoxedInteger) - # inside BoxedInteger.__init__ - setfield_gc(p5, i4, intval) -# inside f: BoxedInteger(-100) -p6 = new(BoxedInteger) - # inside BoxedInteger.__init__ - setfield_gc(p6, -100, intval) + # arguments to the trace: p0, p1 + # inside f: res.add(y) + guard_class(p1, BoxedInteger) + # inside BoxedInteger.add + i2 = getfield_gc(p1, intval) + guard_class(p0, BoxedInteger) + # inside BoxedInteger.add__int + i3 = getfield_gc(p0, intval) + i4 = int_add(i2, i3) + p5 = new(BoxedInteger) + # inside BoxedInteger.__init__ + setfield_gc(p5, i4, intval) + # inside f: BoxedInteger(-100) + p6 = new(BoxedInteger) + # inside BoxedInteger.__init__ + setfield_gc(p6, -100, intval) + + # inside f: .add(BoxedInteger(-100)) + guard_class(p5, BoxedInteger) + # inside BoxedInteger.add + i7 = getfield_gc(p5, intval) + guard_class(p6, BoxedInteger) + # inside BoxedInteger.add__int + i8 = getfield_gc(p6, intval) + i9 = int_add(i7, i8) + p10 = new(BoxedInteger) + # inside BoxedInteger.__init__ + setfield_gc(p10, i9, intval) + + # inside f: BoxedInteger(-1) + p11 = new(BoxedInteger) + # inside BoxedInteger.__init__ + setfield_gc(p11, -1, intval) -# inside f: .add(BoxedInteger(-100)) -guard_class(p5, BoxedInteger) - # inside BoxedInteger.add - i7 = getfield_gc(p5, intval) - guard_class(p6, BoxedInteger) - # inside BoxedInteger.add__int - i8 = getfield_gc(p6, intval) - i9 = int_add(i7, i8) - p10 = new(BoxedInteger) - # inside BoxedInteger.__init__ - setfield_gc(p10, i9, intval) - -# inside f: BoxedInteger(-1) -p11 = new(BoxedInteger) - # inside BoxedInteger.__init__ - setfield_gc(p11, -1, intval) - -# inside f: y.add(BoxedInteger(-1)) -guard_class(p0, BoxedInteger) - # inside BoxedInteger.add - i12 = getfield_gc(p0, intval) - guard_class(p11, BoxedInteger) - # inside BoxedInteger.add__int - i13 = getfield_gc(p11, intval) - i14 = int_add(i12, i13) - p15 = new(BoxedInteger) - # inside BoxedInteger.__init__ - setfield_gc(p15, i14, intval) - -# inside f: y.is_positive() -guard_class(p15, BoxedInteger) - # inside BoxedInteger.is_positive - i16 = getfield_gc(p15, intval) - i17 = int_gt(i16, 0) -# inside f -guard_true(i17) -jump(p15, p10) + # inside f: y.add(BoxedInteger(-1)) + guard_class(p0, BoxedInteger) + # inside BoxedInteger.add + i12 = getfield_gc(p0, intval) + guard_class(p11, BoxedInteger) + # inside BoxedInteger.add__int + i13 = getfield_gc(p11, intval) + i14 = int_add(i12, i13) + p15 = new(BoxedInteger) + # inside BoxedInteger.__init__ + setfield_gc(p15, i14, intval) + + # inside f: y.is_positive() + guard_class(p15, BoxedInteger) + # inside BoxedInteger.is_positive + i16 = getfield_gc(p15, intval) + i17 = int_gt(i16, 0) + # inside f + guard_true(i17) + jump(p15, p10) \end{verbatim} +\caption{unoptimized trace for the simple object model} +\end{figure} (indentation corresponds to the stack level of the traced functions). @@ -403,6 +423,158 @@ % section Escape Analysis in a Tracing JIT (end) +\section{Escape Analysis Across Loop Boundaries} +\label{sec:crossloop} + +This section is a bit +science-fictiony. The algorithm that PyPy currently uses is significantly more +complex and much harder than the one that is described here. The resulting +behaviour is very similar, however, so we will use the simpler version (and we +might switch to that at some point in the actual implementation). + +In the last section we described how escape analysis can be used to remove +many of the allocations of short-lived objects and many of the type dispatches +that are present in a non-optimized trace. In this section we will improve the +optimization to also handle more cases. + +To understand some more what the optimization described in the last section +can achieve, look at the following figure: + +\includegraphics{figures/obj-lifetime.pdf} + +The figure shows a trace before optimization, together with the lifetime of +various kinds of objects created in the trace. It is executed from top to +bottom. At the bottom, a jump is used to execute the same loop another time. +For clarity, the figure shows two iterations of the loop. +The loop is executed until one of the guards in the trace fails, and the +execution is aborted. + +Some of the operations within this trace are \texttt{new} operations, which each create a +new instance of some class. These instances are used for a while, e.g. by +calling methods on them, reading and writing their fields. Some of these +instances escape, which means that they are stored in some globally accessible +place or are passed into a function. + +Together with the \texttt{new} operations, the figure shows the lifetimes of the +created objects. Objects in category 1 live for a while, and are then just not +used any more. The creation of these objects is removed by the +optimization described in the last section. + +Objects in category 2 live for a while and then escape. The optimization of the +last section deals with them too: the \texttt{new} that creates them and +the field accesses are deferred, until the point where the object escapes. + +The objects in category 3 and 4 are in principle like the objects in category 1 +and 2. They are created, live for a while, but are then passed as an argument +to the \texttt{jump} operation. In the next iteration they can either die (category +3) or escape (category 4). + +The optimization of the last section considered the passing of an object along a +jump to be equivalent to escaping. It was thus treating objects in category 3 +and 4 like those in category 2. + +The improved optimization described in this section will make it possible to deal +better with objects in category 3 and 4. This will have two consequences: on +the one hand, more allocations are removed from the trace (which is clearly +good). As a side-effect of this, the traces will also be type-specialized. + + +%___________________________________________________________________________ + +\subsection{Optimizing Across the Jump} + +Let's look at the final trace obtained in the last section for the example loop. +The final trace was much better than the original one, because many allocations +were removed from it. However, it also still contained allocations: + +\begin{figure} +\includegraphics{figures/step1.pdf} +\end{figure} + +The two new \texttt{BoxedIntegers} stored in \texttt{p15} and \texttt{p10} are passed into +the next iteration of the loop. The next iteration will check that they are +indeed \texttt{BoxedIntegers}, read their \texttt{intval} fields and then not use them +any more. Thus those instances are in category 3. + +In its current state the loop +allocates two \texttt{BoxedIntegers} at the end of every iteration, that then die +very quickly in the next iteration. In addition, the type checks at the start +of the loop are superfluous, at least after the first iteration. + +The reason why we cannot optimize the remaining allocations away is because +their lifetime crosses the jump. To improve the situation, a little trick is +needed. The trace above represents a loop, i.e. the jump at the end jumps to +the beginning. Where in the loop the jump occurs is arbitrary, since the loop +can only be left via failing guards anyway. Therefore it does not change the +semantics of the loop to put the jump at another point into the trace and we +can move the \texttt{jump} operation just above the allocation of the objects that +appear in the current \texttt{jump}. This needs some care, because the arguments to +\texttt{jump} are all currently live variables, thus they need to be adapted. + +If we do that for our example trace above, the trace looks like this: +\begin{figure} +\includegraphics{figures/step2.pdf} +\end{figure} + +Now the lifetime of the remaining allocations no longer crosses the jump, and +we can run our escape analysis a second time, to get the following trace: +\begin{figure} +\includegraphics{figures/step3.pdf} +\end{figure} + +This result is now really good. The code performs the same operations than +the original code, but using direct CPU arithmetic and no boxing, as opposed to +the original version which used dynamic dispatching and boxing. + +Looking at the final trace it is also completely clear that specialization has +happened. The trace corresponds to the situation in which the trace was +originally recorded, which happened to be a loop where \texttt{BoxedIntegers} were +used. The now resulting loop does not refer to the \texttt{BoxedInteger} class at +all any more, but it still has the same behaviour. If the original loop had +used \texttt{BoxedFloats}, the final loop would use \texttt{float\_*} operations +everywhere instead (or even be very different, if the object model had +user-defined classes). + + +%___________________________________________________________________________ + +\subsection{Entering the Loop} + +The approach of placing the \texttt{jump} at some other point in the loop leads to +one additional complication that we glossed over so far. The beginning of the +original loop corresponds to a point in the original program, namely the +\texttt{while} loop in the function \texttt{f} from the last section. + +Now recall that in a VM that uses a tracing JIT, all programs start by being +interpreted. This means that when \texttt{f} is executed by the interpreter, it is +easy to go from the interpreter to the first version of the compiled loop. +After the \texttt{jump} is moved and the escape analysis optimization is applied a +second time, this is no longer easily possible. In particular, the new loop +expects two integers as input arguments, while the old one expected two +instances. + +To make it possible to enter the loop directly from the intepreter, there +needs to be some additional code that enters the loop by taking as input +arguments what is available to the interpreter, i.e. two instances. This +additional code corresponds to one iteration of the loop, which is thus +peeled off \cite{XXX}: + +\begin{figure} +\includegraphics{figures/step4.pdf} +\end{figure} + + +%___________________________________________________________________________ + +\subsection{Summary} + +The optimization described in this section can be used to optimize away +allocations in category 3 and improve allocations in category 4, by deferring +them until they are no longer avoidable. A side-effect of these optimizations +is also that the optimized loops are specialized for the types of the variables +that are used inside them. + +% section Escape Analysis Across Loop Boundaries (end) \section{Evaluation} \label{sec:Evaluation} From cfbolz at codespeak.net Thu Sep 23 11:48:48 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Thu, 23 Sep 2010 11:48:48 +0200 (CEST) Subject: [pypy-svn] r77291 - pypy/extradoc/talk/pepm2011/figures Message-ID: <20100923094848.E76F9282BFB@codespeak.net> Author: cfbolz Date: Thu Sep 23 11:48:47 2010 New Revision: 77291 Added: pypy/extradoc/talk/pepm2011/figures/obj-lifetime.pdf (contents, props changed) Log: add pdf Added: pypy/extradoc/talk/pepm2011/figures/obj-lifetime.pdf ============================================================================== Binary file. No diff available. From cfbolz at codespeak.net Thu Sep 23 11:49:39 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Thu, 23 Sep 2010 11:49:39 +0200 (CEST) Subject: [pypy-svn] r77292 - pypy/extradoc/talk/pepm2011/figures Message-ID: <20100923094939.93DAE282BFB@codespeak.net> Author: cfbolz Date: Thu Sep 23 11:49:38 2010 New Revision: 77292 Removed: pypy/extradoc/talk/pepm2011/figures/obj-lifetime.png pypy/extradoc/talk/pepm2011/figures/step1.png pypy/extradoc/talk/pepm2011/figures/step2.png pypy/extradoc/talk/pepm2011/figures/step3.png pypy/extradoc/talk/pepm2011/figures/step4.png Log: kill pngs From antocuni at codespeak.net Thu Sep 23 11:58:59 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Thu, 23 Sep 2010 11:58:59 +0200 (CEST) Subject: [pypy-svn] r77293 - pypy/branch/jitffi/pypy/rlib/test Message-ID: <20100923095859.64CFE282B90@codespeak.net> Author: antocuni Date: Thu Sep 23 11:58:58 2010 New Revision: 77293 Removed: pypy/branch/jitffi/pypy/rlib/test/test_jitffi.py Modified: pypy/branch/jitffi/pypy/rlib/test/test_libffi.py Log: kill test_jitffi, move the test to test_libffi. Skip for now the former libffi tests, as they were broken during the refactoring Modified: pypy/branch/jitffi/pypy/rlib/test/test_libffi.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/test/test_libffi.py (original) +++ pypy/branch/jitffi/pypy/rlib/test/test_libffi.py Thu Sep 23 11:58:58 2010 @@ -26,17 +26,19 @@ else: return 'libm.so' - -class TestLibffi(object): - def setup_method(self, meth): - ALLOCATED.clear() - +class BaseFfiTest(object): def get_libc(self): return CDLL(get_libc_name()) def get_libm(self): return CDLL(get_libm_name(sys.platform)) + +class TestCLibffi(BaseFfiTest): + def setup_method(self, meth): + py.test.skip("broken during the refactoring, FIXME") + ALLOCATED.clear() + def test_library_open(self): lib = self.get_libc() del lib @@ -421,3 +423,19 @@ print hex(handle) assert handle != 0 assert handle % 0x1000 == 0 + + +class TestLibffi(BaseFfiTest): + """ + Test the new JIT-friendly interface to libffi + """ + + def test_call_argchain(self): + libm = self.get_libm() + pow_ptr = libm.getpointer('pow', [ffi_type_double, ffi_type_double], + ffi_type_double) + pow = Func(pow_ptr) + argchain = FloatArg(2.0) + argchain.next = FloatArg(3.0) + res = pow.call(argchain, rffi.DOUBLE) + assert res == 8.0 From cfbolz at codespeak.net Thu Sep 23 12:10:44 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Thu, 23 Sep 2010 12:10:44 +0200 (CEST) Subject: [pypy-svn] r77295 - pypy/extradoc/talk/pepm2011 Message-ID: <20100923101044.71B1B282B90@codespeak.net> Author: cfbolz Date: Thu Sep 23 12:10:43 2010 New Revision: 77295 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: move stuff around a bit Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Thu Sep 23 12:10:43 2010 @@ -140,6 +140,49 @@ \subsection{PyPy} \label{sub:PyPy} +\section{Object Lifetimes in a Tracing JIT} +\label{sec:lifetimes} + +% section Object Lifetimes in a Tracing JIT (end) + +To understand the problems that this paper is trying to solve some more, we +first need to understand various cases of object lifetimes that can occur in a +tracing JIT compiler. + +\begin{figure} +\includegraphics{figures/obj-lifetime.pdf} + +\caption{Object Lifetimes in a Trace} +\label{fig:lifetimes} +\end{figure} + +The figure shows a trace before optimization, together with the lifetime of +various kinds of objects created in the trace. It is executed from top to +bottom. At the bottom, a jump is used to execute the same loop another time. +For clarity, the figure shows two iterations of the loop. +The loop is executed until one of the guards in the trace fails, and the +execution is aborted. + +Some of the operations within this trace are \texttt{new} operations, which each create a +new instance of some class. These instances are used for a while, e.g. by +calling methods on them, reading and writing their fields. Some of these +instances escape, which means that they are stored in some globally accessible +place or are passed into a function. + +Together with the \texttt{new} operations, the figure shows the lifetimes of the +created objects. Objects in category 1 live for a while, and are then just not +used any more. The creation of these objects is removed by the +optimization described in the last section. + +Objects in category 2 live for a while and then escape. The optimization of the +last section deals with them too: the \texttt{new} that creates them and +the field accesses are deferred, until the point where the object escapes. + +The objects in category 3 and 4 are in principle like the objects in category 1 +and 2. They are created, live for a while, but are then passed as an argument +to the \texttt{jump} operation. In the next iteration they can either die (category +3) or escape (category 4). + \section{Escape Analysis in a Tracing JIT} \label{sec:Escape Analysis in a Tracing JIT} @@ -426,49 +469,11 @@ \section{Escape Analysis Across Loop Boundaries} \label{sec:crossloop} -This section is a bit -science-fictiony. The algorithm that PyPy currently uses is significantly more -complex and much harder than the one that is described here. The resulting -behaviour is very similar, however, so we will use the simpler version (and we -might switch to that at some point in the actual implementation). - In the last section we described how escape analysis can be used to remove many of the allocations of short-lived objects and many of the type dispatches that are present in a non-optimized trace. In this section we will improve the optimization to also handle more cases. -To understand some more what the optimization described in the last section -can achieve, look at the following figure: - -\includegraphics{figures/obj-lifetime.pdf} - -The figure shows a trace before optimization, together with the lifetime of -various kinds of objects created in the trace. It is executed from top to -bottom. At the bottom, a jump is used to execute the same loop another time. -For clarity, the figure shows two iterations of the loop. -The loop is executed until one of the guards in the trace fails, and the -execution is aborted. - -Some of the operations within this trace are \texttt{new} operations, which each create a -new instance of some class. These instances are used for a while, e.g. by -calling methods on them, reading and writing their fields. Some of these -instances escape, which means that they are stored in some globally accessible -place or are passed into a function. - -Together with the \texttt{new} operations, the figure shows the lifetimes of the -created objects. Objects in category 1 live for a while, and are then just not -used any more. The creation of these objects is removed by the -optimization described in the last section. - -Objects in category 2 live for a while and then escape. The optimization of the -last section deals with them too: the \texttt{new} that creates them and -the field accesses are deferred, until the point where the object escapes. - -The objects in category 3 and 4 are in principle like the objects in category 1 -and 2. They are created, live for a while, but are then passed as an argument -to the \texttt{jump} operation. In the next iteration they can either die (category -3) or escape (category 4). - The optimization of the last section considered the passing of an object along a jump to be equivalent to escaping. It was thus treating objects in category 3 and 4 like those in category 2. @@ -483,6 +488,12 @@ \subsection{Optimizing Across the Jump} +\footnote{This section is a bit +science-fictiony. The algorithm that PyPy currently uses is significantly more +complex and much harder than the one that is described here. The resulting +behaviour is very similar, however, so we will use the simpler version (and we +might switch to that at some point in the actual implementation).} + Let's look at the final trace obtained in the last section for the example loop. The final trace was much better than the original one, because many allocations were removed from it. However, it also still contained allocations: From arigo at codespeak.net Thu Sep 23 13:42:59 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 23 Sep 2010 13:42:59 +0200 (CEST) Subject: [pypy-svn] r77296 - in pypy/branch/jit-str/pypy/jit: codewriter codewriter/test metainterp metainterp/optimizeopt Message-ID: <20100923114259.20391282B90@codespeak.net> Author: arigo Date: Thu Sep 23 13:42:57 2010 New Revision: 77296 Modified: pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/jit-str/pypy/jit/metainterp/resume.py Log: Work towards making str_concat virtuals. Modified: pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py Thu Sep 23 13:42:57 2010 @@ -121,3 +121,15 @@ def analyze_simple_operation(self, op): return op.opname in ('jit_force_virtualizable', 'jit_force_virtual') + +# ____________________________________________________________ + +_callinfo_for_oopspec = {} + +def callinfo_for_oopspec(oopspecindex): + """A memo function that returns the calldescr and the function + address (as an int) of one of the OS_XYZ functions defined above. + Don't use this if there might be several implementations of the same + OS_XYZ specialized by type, e.g. OS_ARRAYCOPY.""" + return _callinfo_for_oopspec.get(oopspecindex, (None, 0)) +callinfo_for_oopspec._annspecialcase_ = 'specialize:memo' Modified: pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py Thu Sep 23 13:42:57 2010 @@ -1,12 +1,12 @@ import py, sys -from pypy.rpython.lltypesystem import lltype, rstr, rclass +from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rclass from pypy.rpython import rlist from pypy.jit.metainterp.history import getkind from pypy.objspace.flow.model import SpaceOperation, Variable, Constant from pypy.objspace.flow.model import Block, Link, c_last_exception from pypy.jit.codewriter.flatten import ListOfKind, IndirectCallTargets from pypy.jit.codewriter import support, heaptracker -from pypy.jit.codewriter.effectinfo import EffectInfo +from pypy.jit.codewriter.effectinfo import EffectInfo, _callinfo_for_oopspec from pypy.jit.codewriter.policy import log from pypy.jit.metainterp.typesystem import deref, arrayItem from pypy.rlib import objectmodel @@ -1029,6 +1029,8 @@ def _handle_oopspec_call(self, op, args, oopspecindex): calldescr = self.callcontrol.getcalldescr(op, oopspecindex) + func = heaptracker.adr2int(llmemory.cast_ptr_to_adr(op.args[0].value)) + _callinfo_for_oopspec[oopspecindex] = calldescr, func op1 = self.rewrite_call(op, 'residual_call', [op.args[0], calldescr], args=args) Modified: pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py Thu Sep 23 13:42:57 2010 @@ -718,6 +718,11 @@ assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_UNI_CONCAT assert op1.args[2] == ListOfKind('ref', [v1, v2]) assert op1.result == v3 + # + # check the callinfo_for_oopspec + got = effectinfo.callinfo_for_oopspec(effectinfo.EffectInfo.OS_UNI_CONCAT) + assert got[0] == op1.args[1] # the calldescr + assert heaptracker.int2adr(got[1]) == llmemory.cast_ptr_to_adr(func) def test_str_stringslice_startonly(): # test that the oopspec is present and correctly transformed Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py Thu Sep 23 13:42:57 2010 @@ -306,6 +306,9 @@ # accumulate counters self.resumedata_memo.update_counters(self.metainterp_sd.profiler) + def send_extra_operation(self, op): + self.first_optimization.propagate_forward(op) + def propagate_forward(self, op): self.producer[op.result] = op opnum = op.opnum Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py Thu Sep 23 13:42:57 2010 @@ -7,7 +7,7 @@ from pypy.jit.metainterp.optimizeutil import _findall from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.optimizeopt.optimizer import * -from pypy.jit.codewriter.effectinfo import EffectInfo +from pypy.jit.codewriter.effectinfo import EffectInfo, callinfo_for_oopspec from pypy.rlib.unroll import unrolling_iterable @@ -196,15 +196,23 @@ return modifier.make_varray(self.arraydescr) -class VStringPlainValue(AbstractVirtualValue): +class VAbstractStringValue(AbstractVirtualValue): + + def getlengthvalue(self): + raise NotImplementedError + + +class VStringPlainValue(VAbstractStringValue): """A string built with newstr(const).""" - def __init__(self, optimizer, size, keybox, source_op=None): - AbstractVirtualValue.__init__(self, optimizer, keybox, source_op) + def setup(self, size): self._chars = [CVAL_ZERO] * size + self._lengthvalue = None # cache only - def getlength(self): - return len(self._chars) + def getlengthvalue(self): + if self._lengthvalue is None: + self._lengthvalue = ConstantValue(ConstInt(len(self._chars))) + return self._lengthvalue def getitem(self, index): return self._chars[index] @@ -232,27 +240,40 @@ value.get_args_for_fail(modifier) def _make_virtual(self, modifier): - return modifier.make_vstrconcat() + return modifier.make_vstrplain() -class VStringConcatValue(AbstractVirtualValue): +class VStringConcatValue(VAbstractStringValue): """The concatenation of two other strings.""" - def __init__(self, optimizer, keybox): - AbstractVirtualValue.__init__(self, optimizer, keybox) - self._left = None - self._right = None + def setup(self, left, right, length): + self.left = left + self.right = right + self.lengthvalue = length + + def getlengthvalue(self): + return self.lengthvalue def _really_force(self): - xxx + assert self.source_op is not None + calldescr, func = callinfo_for_oopspec(EffectInfo.OS_STR_CONCAT) + leftbox = self.left.force_box() + rightbox = self.right.force_box() + self.box = box = self.source_op.result + newoperations = self.optimizer.newoperations + newoperations.append(ResOperation(rop.CALL, + [ConstInt(func), leftbox, rightbox], + box, calldescr)) def get_args_for_fail(self, modifier): if self.box is None and not modifier.already_seen_virtual(self.keybox): - leftbox = self._left.get_key_box() - rightbox = self._right.get_key_box() + # we don't store the lengthvalue in guards, because the + # guard-failed code starts with a regular STR_CONCAT again + leftbox = self.left.get_key_box() + rightbox = self.right.get_key_box() modifier.register_virtual_fields(self.keybox, [leftbox, rightbox]) - self._left.get_args_for_fail(modifier) - self._right.get_args_for_fail(modifier) + self.left.get_args_for_fail(modifier) + self.right.get_args_for_fail(modifier) def _make_virtual(self, modifier): return modifier.make_vstrconcat() @@ -346,13 +367,13 @@ self.make_equal_to(box, vvalue) return vvalue - def make_vstring_plain(self, length, box, source_op=None): - vvalue = VStringPlainValue(self.optimizer, length, box, source_op) + def make_vstring_plain(self, box, source_op=None): + vvalue = VStringPlainValue(self.optimizer, box, source_op) self.make_equal_to(box, vvalue) return vvalue - def make_vstring_concat(self, box): - vvalue = VStringConcatValue(self.optimizer, box) + def make_vstring_concat(self, box, source_op=None): + vvalue = VStringConcatValue(self.optimizer, box, source_op) self.make_equal_to(box, vvalue) return vvalue @@ -398,14 +419,13 @@ vrefinfo = self.optimizer.metainterp_sd.virtualref_info # op.args[1] should really never point to null here # - set 'forced' to point to the real object - op1 = ResOperation(rop.SETFIELD_GC, op.args, None, - descr = vrefinfo.descr_forced) - self.optimize_SETFIELD_GC(op1) + seo = self.optimizer.send_extra_operation + seo(ResOperation(rop.SETFIELD_GC, op.args, None, + descr = vrefinfo.descr_forced)) # - set 'virtual_token' to TOKEN_NONE args = [op.args[0], ConstInt(vrefinfo.TOKEN_NONE)] - op1 = ResOperation(rop.SETFIELD_GC, args, None, - descr = vrefinfo.descr_virtual_token) - self.optimize_SETFIELD_GC(op1) + seo(ResOperation(rop.SETFIELD_GC, args, None, + descr = vrefinfo.descr_virtual_token)) # Note that in some cases the virtual in op.args[1] has been forced # already. This is fine. In that case, and *if* a residual # CALL_MAY_FORCE suddenly turns out to access it, then it will @@ -535,7 +555,8 @@ # build a new one with the ConstInt argument if not isinstance(op.args[0], ConstInt): op = ResOperation(rop.NEWSTR, [length_box], op.result) - self.make_vstring_plain(length_box.getint(), op.result, op) + vvalue = self.make_vstring_plain(op.result, op) + vvalue.setup(length_box.getint()) else: self.emit_operation(op) @@ -563,15 +584,23 @@ def optimize_STRLEN(self, op): value = self.getvalue(op.args[0]) if isinstance(value, VStringPlainValue): # even if no longer virtual - self.make_constant_int(op.result, value.getlength()) + self.make_equal_to(op.result, value.getlengthvalue()) else: value.ensure_nonnull() self.emit_operation(op) def opt_call_oopspec_STR_CONCAT(self, op): - value = self.make_vstring_concat(op.result) - value._left = self.getvalue(op.args[1]) - value._right = self.getvalue(op.args[2]) + lengthbox = BoxInt() + len1box = BoxInt() + len2box = BoxInt() + seo = self.optimizer.send_extra_operation + seo(ResOperation(rop.STRLEN, [op.args[1]], len1box)) + seo(ResOperation(rop.STRLEN, [op.args[2]], len2box)) + seo(ResOperation(rop.INT_ADD, [len1box, len2box], lengthbox)) + value = self.make_vstring_concat(op.result, op) + value.setup(left = self.getvalue(op.args[1]), + right = self.getvalue(op.args[2]), + length = self.getvalue(lengthbox)) return True def propagate_forward(self, op): Modified: pypy/branch/jit-str/pypy/jit/metainterp/resume.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/resume.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/resume.py Thu Sep 23 13:42:57 2010 @@ -4,6 +4,7 @@ from pypy.jit.metainterp.history import INT, REF, FLOAT, HOLE from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp import jitprof +from pypy.jit.codewriter.effectinfo import EffectInfo, callinfo_for_oopspec from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rlib import rarithmetic from pypy.rlib.objectmodel import we_are_translated, specialize @@ -253,6 +254,9 @@ def make_varray(self, arraydescr): return VArrayInfo(arraydescr) + def make_vstrplain(self): + return VStrPlainInfo() + def make_vstrconcat(self): return VStrConcatInfo() @@ -489,14 +493,8 @@ for i in self.fieldnums: debug_print("\t\t", str(untag(i))) -class VStrConcatInfo(AbstractVirtualInfo): - """Stands for the string made out of the concatenation of all - fieldnums. Each fieldnum can be an integer (the ord() of a single - character) or a pointer (another string). XXX only integers implemented - """ - def __init__(self): - pass - #self.fieldnums = ... +class VStrPlainInfo(AbstractVirtualInfo): + """Stands for the string made out of the characters of all fieldnums.""" @specialize.argtype(1) def allocate(self, decoder): @@ -510,7 +508,29 @@ decoder.strsetitem(string, i, self.fieldnums[i]) def debug_prints(self): - debug_print("\tvstringinfo") + debug_print("\tvstrplaininfo length", len(self.fieldnums)) + + +class VStrConcatInfo(AbstractVirtualInfo): + """Stands for the string made out of the concatenation of two + other strings.""" + + @specialize.argtype(1) + def allocate(self, decoder): + # xxx for blackhole resuming, this will build all intermediate + # strings and throw them away immediately, which is a bit sub- + # efficient. Not sure we care. + left, right = self.fieldnums + return decoder.concat_strings(left, right) + + @specialize.argtype(1) + def setfields(self, decoder, string): + # we do everything in allocate(); no risk of circular data structure + # with strings. + pass + + def debug_prints(self): + debug_print("\tvstrconcatinfo") for i in self.fieldnums: debug_print("\t\t", str(untag(i))) @@ -651,7 +671,16 @@ arraydescr, ConstInt(length)) def allocate_string(self, length): - return self.metainterp.execute_and_record(rop.NEWSTR, ConstInt(length)) + return self.metainterp.execute_and_record(rop.NEWSTR, + None, ConstInt(length)) + + def concat_strings(self, str1num, str2num): + calldescr, func = callinfo_for_oopspec(EffectInfo.OS_STR_CONCAT) + str1box = self.decode_box(str1num, REF) + str2box = self.decode_box(str2num, REF) + return self.metainterp.execute_and_record(rop.CALL, calldescr, + ConstInt(func), + str1box, str2box) def setfield(self, descr, structbox, fieldnum): if descr.is_pointer_field(): From arigo at codespeak.net Thu Sep 23 13:49:53 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 23 Sep 2010 13:49:53 +0200 (CEST) Subject: [pypy-svn] r77297 - pypy/branch/jit-str/pypy/jit/codewriter Message-ID: <20100923114953.50748282B90@codespeak.net> Author: arigo Date: Thu Sep 23 13:49:51 2010 New Revision: 77297 Modified: pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py Log: Fix for tests. Raise AssertionError in translated tests if this is ever hit. Modified: pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py Thu Sep 23 13:49:51 2010 @@ -126,10 +126,15 @@ _callinfo_for_oopspec = {} +def callinfo_for_oopspec_memo(oopspecindex): + return _callinfo_for_oopspec.get(oopspecindex, (None, 0)) +callinfo_for_oopspec_memo._annspecialcase_ = 'specialize:memo' + def callinfo_for_oopspec(oopspecindex): """A memo function that returns the calldescr and the function address (as an int) of one of the OS_XYZ functions defined above. Don't use this if there might be several implementations of the same OS_XYZ specialized by type, e.g. OS_ARRAYCOPY.""" - return _callinfo_for_oopspec.get(oopspecindex, (None, 0)) -callinfo_for_oopspec._annspecialcase_ = 'specialize:memo' + calldescr, func = callinfo_for_oopspec_memo(oopspecindex) + assert calldescr is not None + return calldescr, func Modified: pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py Thu Sep 23 13:49:51 2010 @@ -1029,8 +1029,12 @@ def _handle_oopspec_call(self, op, args, oopspecindex): calldescr = self.callcontrol.getcalldescr(op, oopspecindex) - func = heaptracker.adr2int(llmemory.cast_ptr_to_adr(op.args[0].value)) - _callinfo_for_oopspec[oopspecindex] = calldescr, func + if isinstance(op.args[0].value, str): + pass # for tests only + else: + func = heaptracker.adr2int( + llmemory.cast_ptr_to_adr(op.args[0].value)) + _callinfo_for_oopspec[oopspecindex] = calldescr, func op1 = self.rewrite_call(op, 'residual_call', [op.args[0], calldescr], args=args) From arigo at codespeak.net Thu Sep 23 14:11:15 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 23 Sep 2010 14:11:15 +0200 (CEST) Subject: [pypy-svn] r77298 - in pypy/branch/jit-str/pypy/jit: codewriter metainterp metainterp/test Message-ID: <20100923121115.33C35282B90@codespeak.net> Author: arigo Date: Thu Sep 23 14:11:13 2010 New Revision: 77298 Modified: pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py pypy/branch/jit-str/pypy/jit/metainterp/resume.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py Log: Fixes. Modified: pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py Thu Sep 23 14:11:13 2010 @@ -126,15 +126,24 @@ _callinfo_for_oopspec = {} -def callinfo_for_oopspec_memo(oopspecindex): +def _callinfo_for_oopspec_memo(oopspecindex): return _callinfo_for_oopspec.get(oopspecindex, (None, 0)) -callinfo_for_oopspec_memo._annspecialcase_ = 'specialize:memo' +_callinfo_for_oopspec_memo._annspecialcase_ = 'specialize:memo' def callinfo_for_oopspec(oopspecindex): """A memo function that returns the calldescr and the function address (as an int) of one of the OS_XYZ functions defined above. Don't use this if there might be several implementations of the same OS_XYZ specialized by type, e.g. OS_ARRAYCOPY.""" - calldescr, func = callinfo_for_oopspec_memo(oopspecindex) + calldescr, func = _callinfo_for_oopspec_memo(oopspecindex) assert calldescr is not None return calldescr, func + +def funcptr_for_oopspec(oopspecindex): + """A memo function that returns a pointer to the function described + by OS_XYZ (as a real low-level function pointer).""" + from pypy.jit.codewriter import heaptracker + _, func_as_int = _callinfo_for_oopspec.get(oopspecindex, (None, 0)) + funcadr = heaptracker.int2adr(func_as_int) + return funcadr.ptr +funcptr_for_oopspec._annspecialcase_ = 'specialize:memo' Modified: pypy/branch/jit-str/pypy/jit/metainterp/resume.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/resume.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/resume.py Thu Sep 23 14:11:13 2010 @@ -5,7 +5,8 @@ from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp import jitprof from pypy.jit.codewriter.effectinfo import EffectInfo, callinfo_for_oopspec -from pypy.rpython.lltypesystem import lltype, llmemory, rffi +from pypy.jit.codewriter.effectinfo import funcptr_for_oopspec +from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rstr from pypy.rlib import rarithmetic from pypy.rlib.objectmodel import we_are_translated, specialize from pypy.rlib.debug import have_debug_prints @@ -678,9 +679,8 @@ calldescr, func = callinfo_for_oopspec(EffectInfo.OS_STR_CONCAT) str1box = self.decode_box(str1num, REF) str2box = self.decode_box(str2num, REF) - return self.metainterp.execute_and_record(rop.CALL, calldescr, - ConstInt(func), - str1box, str2box) + return self.metainterp.execute_and_record_varargs( + rop.CALL, [ConstInt(func), str1box, str2box], calldescr) def setfield(self, descr, structbox, fieldnum): if descr.is_pointer_field(): @@ -902,6 +902,15 @@ def allocate_string(self, length): return self.cpu.bh_newstr(length) + def concat_strings(self, str1num, str2num): + str1 = self.decode_ref(str1num) + str2 = self.decode_ref(str2num) + str1 = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), str1) + str2 = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), str2) + funcptr = funcptr_for_oopspec(EffectInfo.OS_STR_CONCAT) + result = funcptr(str1, str2) + return lltype.cast_opaque_ptr(llmemory.GCREF, result) + def setfield(self, descr, struct, fieldnum): if descr.is_pointer_field(): newvalue = self.decode_ref(fieldnum) Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py Thu Sep 23 14:11:13 2010 @@ -155,16 +155,17 @@ newstr=0, strgetitem=1, strsetitem=0, strlen=0) def test_strconcat_pure(self): - for dochr in [chr, ]: #unichr]: + for somestr in ["abc", ]: #u"def"]: jitdriver = JitDriver(greens = [], reds = ['m', 'n']) @dont_look_inside def escape(x): pass + mylist = [somestr+str(i) for i in range(10)] def f(n, m): while m >= 0: jitdriver.can_enter_jit(m=m, n=n) jitdriver.jit_merge_point(m=m, n=n) - s = dochr(n) + dochr(m) + s = mylist[n] + mylist[m] if m > 100: escape(s) m -= 1 @@ -174,6 +175,46 @@ newunicode=0, unicodesetitem=0, call=0, call_pure=0) + def test_strconcat_escape(self): + for somestr in ["abc", ]: #u"def"]: + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + mylist = [somestr+str(i) for i in range(10)] + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = mylist[n] + mylist[m] + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=0, strsetitem=0, + newunicode=0, unicodesetitem=0, + call=2, call_pure=0) # ll_strconcat, escape + + def test_strconcat_guard_fail(self): + for somestr in ["abc", ]: #u"def"]: + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + mylist = [somestr+str(i) for i in range(12)] + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = mylist[n] + mylist[m] + if m & 1: + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 10]) + self.check_loops(newstr=0, strsetitem=0, + newunicode=0, unicodesetitem=0) + class TestOOtype(StringTests, OOJitMixin): CALL = "oosend" From antocuni at codespeak.net Thu Sep 23 15:49:02 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Thu, 23 Sep 2010 15:49:02 +0200 (CEST) Subject: [pypy-svn] r77299 - in pypy/branch/jitffi/pypy/jit/metainterp: optimizeopt test Message-ID: <20100923134902.C87DA282B90@codespeak.net> Author: antocuni Date: Thu Sep 23 15:49:00 2010 New Revision: 77299 Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py Log: first passing test about rewriting a series of push_arg+call into a call_c operation. Not sure it's exactly what we want, though :-) Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py Thu Sep 23 15:49:00 2010 @@ -1,22 +1,55 @@ - +from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.optimizeutil import _findall -from pypy.rlib.objectmodel import we_are_translated -from optimizer import * +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization class OptCCall(Optimization): - def _dissect_virtual_value(self, args_so_far, value): - pass + + def __init__(self): + self.func_args = {} + + def get_oopspec(self, funcbox): + # XXX: not RPython at all, just a hack while waiting to have an + # "official" way to know if and which oopspec we are calling + funcname = str(funcbox) + if '_libffi_prepare_call' in funcname: + return 'prepare_call' + elif '_libffi_push_arg' in funcname: + return 'push_arg' + elif '_libffi_call' in funcname: + return 'call' + return None def optimize_CALL(self, op): + funcbox = op.args[0] + oopspec = self.get_oopspec(funcbox) + if oopspec == 'prepare_call': + self.do_prepare_call(op) + return + elif oopspec == 'push_arg': + self.do_push_arg(op) + return + elif oopspec == 'call': + op = self.do_call(op) self.emit_operation(op) - return - args = [] - v = self.getvalue(op.args[2]) - while v: - v = self._dissect_virtual_value(args, v) - import pdb - pdb.set_trace() - return self.optimize_default(op) + + def do_prepare_call(self, op): + funcbox = op.args[1] + assert funcbox not in self.func_args + self.func_args[funcbox] = [] + + def do_push_arg(self, op): + funcbox = op.args[1] + self.func_args[funcbox].append(op) + + def do_call(self, op): + funcbox = op.args[1] + funcsymbox = op.args[2] + arglist = [funcsymbox] + for push_op in self.func_args[funcbox]: + arglist.append(push_op.args[2]) + newop = ResOperation(rop.CALL_C, arglist, op.result, None) + del self.func_args[funcbox] + return newop def propagate_forward(self, op): opnum = op.opnum Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py Thu Sep 23 15:49:00 2010 @@ -3818,22 +3818,24 @@ """ self.optimize_loop(ops, 'Not, Not', expected) - def test_arg_pushing(self): + def test_ffi_call(self): + # XXX: do we want to promote p0 and get rid of the getfield? ops = """ - [p1, i0] - p2 = new_with_vtable(ConstClass(IntArgVTable)) - p3 = new_with_vtable(ConstClass(IntArgVTable)) - setfield_gc(p3, Const(1), descr=int_arg_intval) - setfield_gc(p2, p3, descr=int_arg_next) - setfield_gc(p2, i0, descr=int_arg_intval) - i4 = call(13, p1, p2, descr=nonwritedescr) - guard_no_exception() [] + [p0, i1, f2] + call("_libffi_prepare_call", p0, descr=plaincalldescr) + call("_libffi_push_arg_Signed", p0, i1, descr=plaincalldescr) + call("_libffi_push_arg_Float", p0, f2, descr=plaincalldescr) + p3 = getfield_gc_pure(p0) # funcsym + i4 = call("_libffi_call", p0, p3, descr=plaincalldescr) + jump(p0, i4, f2) """ expected = """ - [p1, i0] - call_c( + [p0, i1, f2] + p3 = getfield_gc_pure(p0) + i4 = call_c(p3, i1, f2) + jump(p0, i4, f2) """ - self.optimize_loop(ops, 'Not, Not', expected) + self.optimize_loop(ops, 'Not, Not, Not', expected) ##class TestOOtype(BaseTestOptimizeOpt, OOtypeMixin): From arigo at codespeak.net Thu Sep 23 15:54:28 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 23 Sep 2010 15:54:28 +0200 (CEST) Subject: [pypy-svn] r77300 - in pypy/branch/jit-str/pypy/jit: backend/llgraph metainterp metainterp/optimizeopt metainterp/test Message-ID: <20100923135428.CFC7C282C18@codespeak.net> Author: arigo Date: Thu Sep 23 15:54:26 2010 New Revision: 77300 Modified: pypy/branch/jit-str/pypy/jit/backend/llgraph/llimpl.py pypy/branch/jit-str/pypy/jit/metainterp/executor.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/jit-str/pypy/jit/metainterp/resoperation.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py Log: Finish optimizing str_concats. It builds the final string at once from parts that can be substrings or single characters. See the tests in test_optimizeopt. Modified: pypy/branch/jit-str/pypy/jit/backend/llgraph/llimpl.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/backend/llgraph/llimpl.py (original) +++ pypy/branch/jit-str/pypy/jit/backend/llgraph/llimpl.py Thu Sep 23 15:54:26 2010 @@ -1382,6 +1382,16 @@ uni = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string) uni.chars[index] = unichr(newvalue) +def do_copystrcontent(src, dst, srcstart, dststart, length): + src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), src) + dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), dst) + rstr.copy_string_contents(src, dst, srcstart, dststart, length) + +def do_copyunicodecontent(src, dst, srcstart, dststart, length): + src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), src) + dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), dst) + rstr.copy_unicode_contents(src, dst, srcstart, dststart, length) + # ---------- call ---------- _call_args_i = [] Modified: pypy/branch/jit-str/pypy/jit/metainterp/executor.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/executor.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/executor.py Thu Sep 23 15:54:26 2010 @@ -2,7 +2,7 @@ """ import py -from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.lltypesystem import lltype, llmemory, rstr from pypy.rpython.ootypesystem import ootype from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.rarithmetic import ovfcheck, r_uint, intmask @@ -203,6 +203,24 @@ def do_same_as(cpu, _, box): return box.clonebox() +def do_copystrcontent(cpu, _, srcbox, dstbox, + srcstartbox, dststartbox, lengthbox): + src = srcbox.getptr(lltype.Ptr(rstr.STR)) + dst = dstbox.getptr(lltype.Ptr(rstr.STR)) + srcstart = srcstartbox.getint() + dststart = dststartbox.getint() + length = lengthbox.getint() + rstr.copy_string_contents(src, dst, srcstart, dststart, length) + +def do_copyunicodecontent(cpu, _, srcbox, dstbox, + srcstartbox, dststartbox, lengthbox): + src = srcbox.getptr(lltype.Ptr(rstr.UNICODE)) + dst = dstbox.getptr(lltype.Ptr(rstr.UNICODE)) + srcstart = srcstartbox.getint() + dststart = dststartbox.getint() + length = lengthbox.getint() + rstr.copy_unicode_contents(src, dst, srcstart, dststart, length) + # ____________________________________________________________ ##def do_force_token(cpu): Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py Thu Sep 23 15:54:26 2010 @@ -126,6 +126,16 @@ def setitem(self, index, value): raise NotImplementedError + def getstrlen(self, newoperations): + box = self.force_box() + lengthbox = BoxInt() + newoperations.append(ResOperation(rop.STRLEN, [box], lengthbox)) + return lengthbox + + def string_copy_parts(self, *args): + from pypy.jit.metainterp.optimizeopt import virtualize + return virtualize.default_string_copy_parts(self, *args) + class ConstantValue(OptValue): def __init__(self, box): self.make_constant(box) Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py Thu Sep 23 15:54:26 2010 @@ -7,7 +7,7 @@ from pypy.jit.metainterp.optimizeutil import _findall from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.optimizeopt.optimizer import * -from pypy.jit.codewriter.effectinfo import EffectInfo, callinfo_for_oopspec +from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.rlib.unroll import unrolling_iterable @@ -198,8 +198,13 @@ class VAbstractStringValue(AbstractVirtualValue): - def getlengthvalue(self): - raise NotImplementedError + def _really_force(self): + assert self.source_op is not None + self.box = box = self.source_op.result + newoperations = self.optimizer.newoperations + lengthbox = self.getstrlen(newoperations) + newoperations.append(ResOperation(rop.NEWSTR, [lengthbox], box)) + self.string_copy_parts(newoperations, box, CONST_0) class VStringPlainValue(VAbstractStringValue): @@ -207,12 +212,12 @@ def setup(self, size): self._chars = [CVAL_ZERO] * size - self._lengthvalue = None # cache only + self._lengthbox = None # cache only - def getlengthvalue(self): - if self._lengthvalue is None: - self._lengthvalue = ConstantValue(ConstInt(len(self._chars))) - return self._lengthvalue + def getstrlen(self, _): + if self._lengthbox is None: + self._lengthbox = ConstInt(len(self._chars)) + return self._lengthbox def getitem(self, index): return self._chars[index] @@ -221,16 +226,14 @@ assert isinstance(charvalue, OptValue) self._chars[index] = charvalue - def _really_force(self): - assert self.source_op is not None - newoperations = self.optimizer.newoperations - newoperations.append(self.source_op) - self.box = box = self.source_op.result + def string_copy_parts(self, newoperations, targetbox, offsetbox): for i in range(len(self._chars)): charbox = self._chars[i].force_box() - op = ResOperation(rop.STRSETITEM, - [box, ConstInt(i), charbox], None) - newoperations.append(op) + newoperations.append(ResOperation(rop.STRSETITEM, [targetbox, + offsetbox, + charbox], None)) + offsetbox = _int_add(newoperations, offsetbox, CONST_1) + return offsetbox def get_args_for_fail(self, modifier): if self.box is None and not modifier.already_seen_virtual(self.keybox): @@ -246,24 +249,20 @@ class VStringConcatValue(VAbstractStringValue): """The concatenation of two other strings.""" - def setup(self, left, right, length): + def setup(self, left, right, lengthbox): self.left = left self.right = right - self.lengthvalue = length + self.lengthbox = lengthbox - def getlengthvalue(self): - return self.lengthvalue + def getstrlen(self, _): + return self.lengthbox - def _really_force(self): - assert self.source_op is not None - calldescr, func = callinfo_for_oopspec(EffectInfo.OS_STR_CONCAT) - leftbox = self.left.force_box() - rightbox = self.right.force_box() - self.box = box = self.source_op.result - newoperations = self.optimizer.newoperations - newoperations.append(ResOperation(rop.CALL, - [ConstInt(func), leftbox, rightbox], - box, calldescr)) + def string_copy_parts(self, newoperations, targetbox, offsetbox): + offsetbox = self.left.string_copy_parts(newoperations, targetbox, + offsetbox) + offsetbox = self.right.string_copy_parts(newoperations, targetbox, + offsetbox) + return offsetbox def get_args_for_fail(self, modifier): if self.box is None and not modifier.already_seen_virtual(self.keybox): @@ -279,6 +278,32 @@ return modifier.make_vstrconcat() +def default_string_copy_parts(srcvalue, newoperations, targetbox, offsetbox): + # Copies the pointer-to-string 'srcvalue' into the target string + # given by 'targetbox', at the specified offset. Returns the offset + # at the end of the copy. + srcbox = srcvalue.force_box() + lengthbox = BoxInt() + newoperations.append(ResOperation(rop.STRLEN, [srcbox], lengthbox)) + nextoffsetbox = _int_add(newoperations, offsetbox, lengthbox) + newoperations.append(ResOperation(rop.COPYSTRCONTENT, [srcbox, targetbox, + CONST_0, offsetbox, + lengthbox], None)) + return nextoffsetbox + +def _int_add(newoperations, box1, box2): + if isinstance(box1, ConstInt): + if box1.value == 0: + return box2 + if isinstance(box2, ConstInt): + return ConstInt(box1.value + box2.value) + elif isinstance(box2, ConstInt) and box2.value == 0: + return box1 + resbox = BoxInt() + newoperations.append(ResOperation(rop.INT_ADD, [box1, box2], resbox)) + return resbox + + class __extend__(SpecNode): def setup_virtual_node(self, optimizer, box, newinputargs): raise NotImplementedError @@ -584,23 +609,23 @@ def optimize_STRLEN(self, op): value = self.getvalue(op.args[0]) if isinstance(value, VStringPlainValue): # even if no longer virtual - self.make_equal_to(op.result, value.getlengthvalue()) + lengthbox = value.getstrlen(self.optimizer.newoperations) + self.make_equal_to(op.result, self.getvalue(lengthbox)) else: value.ensure_nonnull() self.emit_operation(op) def opt_call_oopspec_STR_CONCAT(self, op): - lengthbox = BoxInt() - len1box = BoxInt() - len2box = BoxInt() - seo = self.optimizer.send_extra_operation - seo(ResOperation(rop.STRLEN, [op.args[1]], len1box)) - seo(ResOperation(rop.STRLEN, [op.args[2]], len2box)) - seo(ResOperation(rop.INT_ADD, [len1box, len2box], lengthbox)) + vleft = self.getvalue(op.args[1]) + vright = self.getvalue(op.args[2]) + newoperations = self.optimizer.newoperations + len1box = vleft.getstrlen(newoperations) + len2box = vright.getstrlen(newoperations) + lengthbox = _int_add(newoperations, len1box, len2box) value = self.make_vstring_concat(op.result, op) value.setup(left = self.getvalue(op.args[1]), right = self.getvalue(op.args[2]), - length = self.getvalue(lengthbox)) + lengthbox = lengthbox) return True def propagate_forward(self, op): Modified: pypy/branch/jit-str/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/resoperation.py Thu Sep 23 15:54:26 2010 @@ -221,6 +221,8 @@ 'COND_CALL_GC_WB', # [objptr, newvalue] (for the write barrier) 'DEBUG_MERGE_POINT/1', # debugging only 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend + 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length + 'COPYUNICODECONTENT/5', '_CANRAISE_FIRST', # ----- start of can_raise operations ----- 'CALL', Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py Thu Sep 23 15:54:26 2010 @@ -117,6 +117,8 @@ EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE)) arraycopydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [], [], oopspecindex=EffectInfo.OS_ARRAYCOPY)) + strconcatdescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], oopspecindex=EffectInfo.OS_STR_CONCAT)) class LoopToken(AbstractDescr): pass asmdescr = LoopToken() # it can be whatever, it's not a descr though Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py Thu Sep 23 15:54:26 2010 @@ -3891,7 +3891,130 @@ """ self.optimize_loop(ops, 'Not, Not', expected) + def test_newstr_1(self): + ops = """ + [i0] + p1 = newstr(1) + strsetitem(p1, 0, i0) + i1 = strgetitem(p1, 0) + jump(i1) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_newstr_2(self): + ops = """ + [i0, i1] + p1 = newstr(2) + strsetitem(p1, 0, i0) + strsetitem(p1, 1, i1) + i2 = strgetitem(p1, 1) + i3 = strgetitem(p1, 0) + jump(i2, i3) + """ + expected = """ + [i0, i1] + jump(i1, i0) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_concat_1(self): + ops = """ + [p1, p2] + p3 = call(0, p1, p2, descr=strconcatdescr) + jump(p2, p3) + """ + expected = """ + [p1, p2] + i1 = strlen(p1) + i2 = strlen(p2) + i3 = int_add(i1, i2) + p3 = newstr(i3) + i4 = strlen(p1) + copystrcontent(p1, p3, 0, 0, i4) + i5 = strlen(p2) + i6 = int_add(i4, i5) # will be killed by the backend + copystrcontent(p2, p3, 0, i4, i5) + jump(p2, p3) + """ + self.optimize_loop(ops, 'Not, Not', expected) + def test_concat_vstr2_str(self): + ops = """ + [i0, i1, p2] + p1 = newstr(2) + strsetitem(p1, 0, i0) + strsetitem(p1, 1, i1) + p3 = call(0, p1, p2, descr=strconcatdescr) + jump(i1, i0, p3) + """ + expected = """ + [i0, i1, p2] + i2 = strlen(p2) + i3 = int_add(2, i2) + p3 = newstr(i3) + strsetitem(p3, 0, i0) + strsetitem(p3, 1, i1) + i4 = strlen(p2) + i5 = int_add(2, i4) # will be killed by the backend + copystrcontent(p2, p3, 0, 2, i4) + jump(i1, i0, p3) + """ + self.optimize_loop(ops, 'Not, Not, Not', expected) + + def test_concat_str_vstr2(self): + ops = """ + [i0, i1, p2] + p1 = newstr(2) + strsetitem(p1, 0, i0) + strsetitem(p1, 1, i1) + p3 = call(0, p2, p1, descr=strconcatdescr) + jump(i1, i0, p3) + """ + expected = """ + [i0, i1, p2] + i2 = strlen(p2) + i3 = int_add(i2, 2) + p3 = newstr(i3) + i4 = strlen(p2) + copystrcontent(p2, p3, 0, 0, i4) + strsetitem(p3, i4, i0) + i5 = int_add(i4, 1) + strsetitem(p3, i5, i1) + i6 = int_add(i5, 1) # will be killed by the backend + jump(i1, i0, p3) + """ + self.optimize_loop(ops, 'Not, Not, Not', expected) + + def test_concat_str_str_str(self): + ops = """ + [p1, p2, p3] + p4 = call(0, p1, p2, descr=strconcatdescr) + p5 = call(0, p4, p3, descr=strconcatdescr) + jump(p2, p3, p5) + """ + expected = """ + [p1, p2, p3] + i1 = strlen(p1) + i2 = strlen(p2) + i12 = int_add(i1, i2) + i3 = strlen(p3) + i123 = int_add(i12, i3) + p5 = newstr(i123) + i1b = strlen(p1) + copystrcontent(p1, p5, 0, 0, i1b) + i2b = strlen(p2) + i12b = int_add(i1b, i2b) + copystrcontent(p2, p5, 0, i1b, i2b) + i3b = strlen(p3) + i123b = int_add(i12b, i3b) # will be killed by the backend + copystrcontent(p3, p5, 0, i12b, i3b) + jump(p2, p3, p5) + """ + self.optimize_loop(ops, 'Not, Not, Not', expected) ##class TestOOtype(BaseTestOptimizeOpt, OOtypeMixin): Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py Thu Sep 23 15:54:26 2010 @@ -175,25 +175,94 @@ newunicode=0, unicodesetitem=0, call=0, call_pure=0) - def test_strconcat_escape(self): - for somestr in ["abc", ]: #u"def"]: - jitdriver = JitDriver(greens = [], reds = ['m', 'n']) - @dont_look_inside - def escape(x): - pass - mylist = [somestr+str(i) for i in range(10)] - def f(n, m): - while m >= 0: - jitdriver.can_enter_jit(m=m, n=n) - jitdriver.jit_merge_point(m=m, n=n) - s = mylist[n] + mylist[m] - escape(s) - m -= 1 - return 42 - self.meta_interp(f, [6, 7]) - self.check_loops(newstr=0, strsetitem=0, - newunicode=0, unicodesetitem=0, - call=2, call_pure=0) # ll_strconcat, escape + def test_strconcat_escape_str_str(self): + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + mylist = ["somestr"+str(i) for i in range(10)] + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = mylist[n] + mylist[m] + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=1, strsetitem=0, copystrcontent=2, + call=1, call_pure=0) # escape + + def test_strconcat_escape_str_char(self): + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + mylist = ["somestr"+str(i) for i in range(10)] + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = mylist[n] + chr(m) + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=1, strsetitem=1, copystrcontent=1, + call=1, call_pure=0) # escape + + def test_strconcat_escape_char_str(self): + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + mylist = ["somestr"+str(i) for i in range(10)] + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = chr(n) + mylist[m] + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=1, strsetitem=1, copystrcontent=1, + call=1, call_pure=0) # escape + + def test_strconcat_escape_char_char(self): + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = chr(n) + chr(m) + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=1, strsetitem=2, copystrcontent=0, + call=1, call_pure=0) # escape + + def test_strconcat_escape_str_char_str(self): + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + mylist = ["somestr"+str(i) for i in range(10)] + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = mylist[n] + chr(n) + mylist[m] + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=1, strsetitem=1, copystrcontent=2, + call=1, call_pure=0) # escape def test_strconcat_guard_fail(self): for somestr in ["abc", ]: #u"def"]: @@ -212,8 +281,6 @@ m -= 1 return 42 self.meta_interp(f, [6, 10]) - self.check_loops(newstr=0, strsetitem=0, - newunicode=0, unicodesetitem=0) class TestOOtype(StringTests, OOJitMixin): From hakanardo at codespeak.net Thu Sep 23 16:46:10 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Thu, 23 Sep 2010 16:46:10 +0200 (CEST) Subject: [pypy-svn] r77301 - in pypy/branch/jit-loop-invaraints/pypy/jit/metainterp: optimizeopt test Message-ID: <20100923144610.8EDD0282B90@codespeak.net> Author: hakanardo Date: Thu Sep 23 16:46:08 2010 New Revision: 77301 Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py Log: Moving guards requires updating fail_args aswell. Dissabling for now. Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py Thu Sep 23 16:46:08 2010 @@ -42,18 +42,17 @@ jmp.descr = loop.token preamble.operations.append(jmp) - elif (op.is_always_pure() or op.is_foldable_guard() or - op.is_ovf()): + elif (op.is_always_pure()):# or op.is_foldable_guard() or op.is_ovf()): if self.has_invariant_args(op): self.emit_invariant(op) return - elif op.is_guard_overflow(): - prev_op = self.optimizer.loop.operations[self.optimizer.i - 1] - v = self.getvalue(prev_op.result) - if v.invariant: - self.emit_invariant(op) - return + #elif op.is_guard_overflow(): + # prev_op = self.optimizer.loop.operations[self.optimizer.i - 1] + # v = self.getvalue(prev_op.result) + # if v.invariant: + # self.emit_invariant(op) + # return self.emit_operation(op) Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py Thu Sep 23 16:46:08 2010 @@ -342,10 +342,11 @@ elif op.returns_bool_result(): self.bool_boxes[self.getvalue(op.result)] = None if op.invariant: + op.invariant = False self.preamble.append(op) else: self.newoperations.append(op) - + def store_final_boxes_in_guard(self, op): ###pendingfields = self.heap_op_optimizer.force_lazy_setfields_for_guard() descr = op.descr Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py Thu Sep 23 16:46:08 2010 @@ -321,6 +321,25 @@ 'int_mul': 1, 'jump': 2}) + def test_loop_invariant_mul_ovf(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + b = y * 2 + res += ovfcheck(x * x) + b + y -= 1 + return res + res = self.meta_interp(f, [6, 7]) + assert res == 308 + self.check_loop_count(2) + self.check_loops({'guard_true': 1, + 'int_add': 1, 'int_sub': 1, 'int_gt': 1, + 'int_mul': 1, + 'jump': 2}) + def test_loop_invariant_intbox(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) class I: From antocuni at codespeak.net Thu Sep 23 16:53:39 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Thu, 23 Sep 2010 16:53:39 +0200 (CEST) Subject: [pypy-svn] r77302 - in pypy/branch/jitffi/pypy: . config doc/discussion interpreter jit/backend/cli jit/backend/llgraph jit/backend/llsupport jit/backend/llsupport/test jit/backend/llvm jit/backend/test jit/backend/x86 jit/backend/x86/test jit/metainterp jit/metainterp/optimizeopt jit/metainterp/test jit/tl jit/tool module/__builtin__ module/__builtin__/test module/_ssl/test module/array module/array/benchmark module/array/test module/gc module/gc/test module/pypyjit/test module/select module/select/test module/sys rlib rlib/test rpython rpython/lltypesystem rpython/lltypesystem/test rpython/memory rpython/memory/gc rpython/memory/gc/test rpython/memory/gctransform rpython/memory/test rpython/numpy translator translator/c translator/c/gcc translator/c/src translator/c/test Message-ID: <20100923145339.3082C282B90@codespeak.net> Author: antocuni Date: Thu Sep 23 16:53:32 2010 New Revision: 77302 Added: pypy/branch/jitffi/pypy/jit/metainterp/test/test_resoperation.py - copied unchanged from r77301, pypy/trunk/pypy/jit/metainterp/test/test_resoperation.py pypy/branch/jitffi/pypy/module/gc/app_referents.py - copied unchanged from r77301, pypy/trunk/pypy/module/gc/app_referents.py pypy/branch/jitffi/pypy/module/gc/referents.py - copied unchanged from r77301, pypy/trunk/pypy/module/gc/referents.py pypy/branch/jitffi/pypy/module/gc/test/test_app_referents.py - copied unchanged from r77301, pypy/trunk/pypy/module/gc/test/test_app_referents.py pypy/branch/jitffi/pypy/module/gc/test/test_referents.py - copied unchanged from r77301, pypy/trunk/pypy/module/gc/test/test_referents.py pypy/branch/jitffi/pypy/rpython/memory/gc/inspect.py - copied unchanged from r77301, pypy/trunk/pypy/rpython/memory/gc/inspect.py pypy/branch/jitffi/pypy/rpython/memory/gc/minimark.py - copied unchanged from r77301, pypy/trunk/pypy/rpython/memory/gc/minimark.py pypy/branch/jitffi/pypy/rpython/memory/gc/minimarkpage.py - copied unchanged from r77301, pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py pypy/branch/jitffi/pypy/rpython/memory/gc/test/test_minimark.py - copied unchanged from r77301, pypy/trunk/pypy/rpython/memory/gc/test/test_minimark.py pypy/branch/jitffi/pypy/rpython/memory/gc/test/test_minimarkpage.py - copied unchanged from r77301, pypy/trunk/pypy/rpython/memory/gc/test/test_minimarkpage.py Removed: pypy/branch/jitffi/pypy/rpython/numpy/ Modified: pypy/branch/jitffi/pypy/ (props changed) pypy/branch/jitffi/pypy/config/translationoption.py pypy/branch/jitffi/pypy/doc/discussion/finalizer-order.txt pypy/branch/jitffi/pypy/interpreter/baseobjspace.py pypy/branch/jitffi/pypy/jit/backend/cli/method.py pypy/branch/jitffi/pypy/jit/backend/cli/runner.py pypy/branch/jitffi/pypy/jit/backend/llgraph/runner.py pypy/branch/jitffi/pypy/jit/backend/llsupport/gc.py pypy/branch/jitffi/pypy/jit/backend/llsupport/regalloc.py pypy/branch/jitffi/pypy/jit/backend/llsupport/test/test_gc.py pypy/branch/jitffi/pypy/jit/backend/llvm/compile.py pypy/branch/jitffi/pypy/jit/backend/test/runner_test.py pypy/branch/jitffi/pypy/jit/backend/test/test_ll_random.py pypy/branch/jitffi/pypy/jit/backend/test/test_random.py pypy/branch/jitffi/pypy/jit/backend/x86/assembler.py pypy/branch/jitffi/pypy/jit/backend/x86/codebuf.py pypy/branch/jitffi/pypy/jit/backend/x86/regalloc.py pypy/branch/jitffi/pypy/jit/backend/x86/test/test_recompilation.py pypy/branch/jitffi/pypy/jit/backend/x86/test/test_regalloc.py pypy/branch/jitffi/pypy/jit/backend/x86/test/test_runner.py pypy/branch/jitffi/pypy/jit/metainterp/compile.py pypy/branch/jitffi/pypy/jit/metainterp/graphpage.py pypy/branch/jitffi/pypy/jit/metainterp/history.py pypy/branch/jitffi/pypy/jit/metainterp/logger.py pypy/branch/jitffi/pypy/jit/metainterp/optimize.py pypy/branch/jitffi/pypy/jit/metainterp/optimizefindnode.py pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ (props changed) pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/__init__.py (contents, props changed) pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/heap.py (contents, props changed) pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/intbounds.py (contents, props changed) pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/intutils.py (props changed) pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/optimizer.py (contents, props changed) pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/rewrite.py (contents, props changed) pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/virtualize.py (contents, props changed) pypy/branch/jitffi/pypy/jit/metainterp/pyjitpl.py pypy/branch/jitffi/pypy/jit/metainterp/resoperation.py pypy/branch/jitffi/pypy/jit/metainterp/simple_optimize.py pypy/branch/jitffi/pypy/jit/metainterp/test/oparser.py pypy/branch/jitffi/pypy/jit/metainterp/test/test_basic.py pypy/branch/jitffi/pypy/jit/metainterp/test/test_logger.py pypy/branch/jitffi/pypy/jit/metainterp/test/test_loop.py pypy/branch/jitffi/pypy/jit/metainterp/test/test_oparser.py pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/jitffi/pypy/jit/metainterp/test/test_recursive.py pypy/branch/jitffi/pypy/jit/metainterp/test/test_virtualref.py pypy/branch/jitffi/pypy/jit/tl/pypyjit_demo.py pypy/branch/jitffi/pypy/jit/tool/showstats.py pypy/branch/jitffi/pypy/jit/tool/traceviewer.py pypy/branch/jitffi/pypy/module/__builtin__/functional.py pypy/branch/jitffi/pypy/module/__builtin__/test/test_minmax.py pypy/branch/jitffi/pypy/module/_ssl/test/test_ssl.py pypy/branch/jitffi/pypy/module/array/benchmark/Makefile (props changed) pypy/branch/jitffi/pypy/module/array/benchmark/intimg.c (props changed) pypy/branch/jitffi/pypy/module/array/benchmark/intimgtst.c (props changed) pypy/branch/jitffi/pypy/module/array/benchmark/intimgtst.py (props changed) pypy/branch/jitffi/pypy/module/array/benchmark/loop.c (props changed) pypy/branch/jitffi/pypy/module/array/benchmark/sum.c (props changed) pypy/branch/jitffi/pypy/module/array/benchmark/sumtst.c (props changed) pypy/branch/jitffi/pypy/module/array/benchmark/sumtst.py (props changed) pypy/branch/jitffi/pypy/module/array/interp_array.py pypy/branch/jitffi/pypy/module/array/test/test_array_old.py (props changed) pypy/branch/jitffi/pypy/module/gc/__init__.py pypy/branch/jitffi/pypy/module/gc/interp_gc.py pypy/branch/jitffi/pypy/module/gc/test/test_gc.py pypy/branch/jitffi/pypy/module/pypyjit/test/test_pypy_c.py pypy/branch/jitffi/pypy/module/select/interp_select.py pypy/branch/jitffi/pypy/module/select/test/test_select.py pypy/branch/jitffi/pypy/module/sys/version.py pypy/branch/jitffi/pypy/rlib/_rsocket_rffi.py pypy/branch/jitffi/pypy/rlib/rarithmetic.py pypy/branch/jitffi/pypy/rlib/rgc.py pypy/branch/jitffi/pypy/rlib/rstring.py pypy/branch/jitffi/pypy/rlib/rwin32.py pypy/branch/jitffi/pypy/rlib/test/test_rgc.py pypy/branch/jitffi/pypy/rpython/llinterp.py pypy/branch/jitffi/pypy/rpython/lltypesystem/ll2ctypes.py pypy/branch/jitffi/pypy/rpython/lltypesystem/llarena.py pypy/branch/jitffi/pypy/rpython/lltypesystem/llheap.py pypy/branch/jitffi/pypy/rpython/lltypesystem/llmemory.py pypy/branch/jitffi/pypy/rpython/lltypesystem/lloperation.py pypy/branch/jitffi/pypy/rpython/lltypesystem/test/test_ll2ctypes.py pypy/branch/jitffi/pypy/rpython/memory/gc/base.py pypy/branch/jitffi/pypy/rpython/memory/gc/generation.py pypy/branch/jitffi/pypy/rpython/memory/gc/markcompact.py pypy/branch/jitffi/pypy/rpython/memory/gc/test/test_direct.py pypy/branch/jitffi/pypy/rpython/memory/gctransform/framework.py pypy/branch/jitffi/pypy/rpython/memory/gctypelayout.py pypy/branch/jitffi/pypy/rpython/memory/gcwrapper.py pypy/branch/jitffi/pypy/rpython/memory/lltypelayout.py pypy/branch/jitffi/pypy/rpython/memory/support.py pypy/branch/jitffi/pypy/rpython/memory/test/test_gc.py pypy/branch/jitffi/pypy/rpython/memory/test/test_support.py pypy/branch/jitffi/pypy/rpython/memory/test/test_transformed_gc.py pypy/branch/jitffi/pypy/rpython/rptr.py pypy/branch/jitffi/pypy/translator/c/funcgen.py pypy/branch/jitffi/pypy/translator/c/gcc/trackgcroot.py pypy/branch/jitffi/pypy/translator/c/genc.py pypy/branch/jitffi/pypy/translator/c/src/mem.h pypy/branch/jitffi/pypy/translator/c/test/test_newgc.py pypy/branch/jitffi/pypy/translator/exceptiontransform.py Log: merge from trunk: svn merge svn+ssh://codespeak.net/svn/pypy/trunk/pypy -r77083:HEAD \n in particular, it merges the resoperation refactoring Modified: pypy/branch/jitffi/pypy/config/translationoption.py ============================================================================== --- pypy/branch/jitffi/pypy/config/translationoption.py (original) +++ pypy/branch/jitffi/pypy/config/translationoption.py Thu Sep 23 16:53:32 2010 @@ -52,7 +52,7 @@ # gc ChoiceOption("gc", "Garbage Collection Strategy", ["boehm", "ref", "marksweep", "semispace", "statistics", - "generation", "hybrid", "markcompact", "none"], + "generation", "hybrid", "markcompact", "minimark", "none"], "ref", requires={ "ref": [("translation.rweakref", False), # XXX ("translation.gctransformer", "ref")], @@ -65,6 +65,7 @@ "hybrid": [("translation.gctransformer", "framework")], "boehm": [("translation.gctransformer", "boehm")], "markcompact": [("translation.gctransformer", "framework")], + "minimark": [("translation.gctransformer", "framework")], }, cmdline="--gc"), ChoiceOption("gctransformer", "GC transformer that is used - internal", Modified: pypy/branch/jitffi/pypy/doc/discussion/finalizer-order.txt ============================================================================== --- pypy/branch/jitffi/pypy/doc/discussion/finalizer-order.txt (original) +++ pypy/branch/jitffi/pypy/doc/discussion/finalizer-order.txt Thu Sep 23 16:53:32 2010 @@ -133,8 +133,8 @@ that doesn't change the state of an object, we don't follow its children recursively. -In practice we can encode the 4 states with a single extra bit in the -header: +In practice, in the SemiSpace, Generation and Hybrid GCs, we can encode +the 4 states with a single extra bit in the header: ===== ============= ======== ==================== state is_forwarded? bit set? bit set in the copy? @@ -150,3 +150,17 @@ bit in the copy at the end, to clean up before the next collection (which means recursively bumping the state from 2 to 3 in the final loop). + +In the MiniMark GC, the objects don't move (apart from when they are +copied out of the nursery), but we use the flag GCFLAG_VISITED to mark +objects that survive, so we can also have a single extra bit for +finalizers: + + ===== ============== ============================ + state GCFLAG_VISITED GCFLAG_FINALIZATION_ORDERING + ===== ============== ============================ + 0 no no + 1 no yes + 2 yes yes + 3 yes no + ===== ============== ============================ Modified: pypy/branch/jitffi/pypy/interpreter/baseobjspace.py ============================================================================== --- pypy/branch/jitffi/pypy/interpreter/baseobjspace.py (original) +++ pypy/branch/jitffi/pypy/interpreter/baseobjspace.py Thu Sep 23 16:53:32 2010 @@ -71,7 +71,8 @@ space.wrap("__class__ assignment: only for heap types")) def user_setup(self, space, w_subtype): - assert False, "only for interp-level user subclasses from typedef.py" + raise NotImplementedError("only for interp-level user subclasses " + "from typedef.py") def getname(self, space, default): try: Modified: pypy/branch/jitffi/pypy/jit/backend/cli/method.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/cli/method.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/cli/method.py Thu Sep 23 16:53:32 2010 @@ -207,9 +207,9 @@ def _collect_types(self, operations, box2classes): for op in operations: - if op.opnum in (rop.GETFIELD_GC, rop.SETFIELD_GC): + if op.getopnum() in (rop.GETFIELD_GC, rop.SETFIELD_GC): box = op.args[0] - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.FieldDescr) box2classes.setdefault(box, []).append(descr.selfclass) if op in self.cliloop.guard2ops: @@ -335,7 +335,7 @@ while self.i < N: op = oplist[self.i] self.emit_debug(op.repr()) - func = self.operations[op.opnum] + func = self.operations[op.getopnum()] assert func is not None func(self, op) self.i += 1 @@ -357,10 +357,10 @@ assert op.is_guard() if op in self.cliloop.guard2ops: inputargs, suboperations = self.cliloop.guard2ops[op] - self.match_var_fox_boxes(op.fail_args, inputargs) + self.match_var_fox_boxes(op.getfailargs(), inputargs) self.emit_operations(suboperations) else: - self.emit_return_failed_op(op, op.fail_args) + self.emit_return_failed_op(op, op.getfailargs()) def emit_end(self): assert self.branches == [] @@ -410,7 +410,7 @@ def emit_ovf_op(self, op, emit_op): next_op = self.oplist[self.i+1] - if next_op.opnum == rop.GUARD_NO_OVERFLOW: + if next_op.getopnum() == rop.GUARD_NO_OVERFLOW: self.i += 1 self.emit_ovf_op_and_guard(op, next_op, emit_op) return @@ -544,7 +544,7 @@ self.emit_guard_overflow_impl(op, OpCodes.Brfalse) def emit_op_jump(self, op): - target_token = op.descr + target_token = op.getdescr() assert isinstance(target_token, LoopToken) if target_token.cliloop is self.cliloop: # jump to the beginning of the loop @@ -586,7 +586,7 @@ self.store_result(op) def emit_op_instanceof(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) clitype = descr.get_clitype() op.args[0].load(self) @@ -604,7 +604,7 @@ self.store_result(op) def emit_op_call_impl(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.StaticMethDescr) delegate_type = descr.get_delegate_clitype() meth_invoke = descr.get_meth_info() @@ -619,7 +619,7 @@ emit_op_call_pure = emit_op_call def emit_op_oosend(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.MethDescr) clitype = descr.get_self_clitype() methinfo = descr.get_meth_info() @@ -639,7 +639,7 @@ self.store_result(op) def emit_op_getfield_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.FieldDescr) clitype = descr.get_self_clitype() fieldinfo = descr.get_field_info() @@ -653,7 +653,7 @@ emit_op_getfield_gc_pure = emit_op_getfield_gc def emit_op_setfield_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.FieldDescr) clitype = descr.get_self_clitype() fieldinfo = descr.get_field_info() @@ -665,7 +665,7 @@ self.il.Emit(OpCodes.Stfld, fieldinfo) def emit_op_getarrayitem_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) clitype = descr.get_array_clitype() itemtype = descr.get_clitype() @@ -678,7 +678,7 @@ emit_op_getarrayitem_gc_pure = emit_op_getarrayitem_gc def emit_op_setarrayitem_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) clitype = descr.get_array_clitype() itemtype = descr.get_clitype() @@ -689,7 +689,7 @@ self.il.Emit(OpCodes.Stelem, itemtype) def emit_op_arraylen_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) clitype = descr.get_array_clitype() op.args[0].load(self) @@ -698,7 +698,7 @@ self.store_result(op) def emit_op_new_array(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) item_clitype = descr.get_clitype() if item_clitype is None: Modified: pypy/branch/jitffi/pypy/jit/backend/cli/runner.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/cli/runner.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/cli/runner.py Thu Sep 23 16:53:32 2010 @@ -105,7 +105,7 @@ def _attach_token_to_faildescrs(self, token, operations): for op in operations: if op.is_guard(): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, AbstractFailDescr) descr._loop_token = token descr._guard_op = op @@ -136,7 +136,7 @@ func = cliloop.funcbox.holder.GetFunc() func(self.get_inputargs()) op = self.failing_ops[self.inputargs.get_failed_op()] - return op.descr + return op.getdescr() def set_future_value_int(self, index, intvalue): self.get_inputargs().set_int(index, intvalue) Modified: pypy/branch/jitffi/pypy/jit/backend/llgraph/runner.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/llgraph/runner.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/llgraph/runner.py Thu Sep 23 16:53:32 2010 @@ -151,16 +151,17 @@ def _compile_operations(self, c, operations, var2index): for op in operations: - llimpl.compile_add(c, op.opnum) - descr = op.descr + llimpl.compile_add(c, op.getopnum()) + descr = op.getdescr() if isinstance(descr, Descr): llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo) - if isinstance(descr, history.LoopToken) and op.opnum != rop.JUMP: + if isinstance(descr, history.LoopToken) and op.getopnum() != rop.JUMP: llimpl.compile_add_loop_token(c, descr) if self.is_oo and isinstance(descr, (OODescr, MethDescr)): # hack hack, not rpython - c._obj.externalobj.operations[-1].descr = descr - for x in op.args: + c._obj.externalobj.operations[-1].setdescr(descr) + for i in range(op.numargs()): + x = op.getarg(i) if isinstance(x, history.Box): llimpl.compile_add_var(c, var2index[x]) elif isinstance(x, history.ConstInt): @@ -173,10 +174,10 @@ raise Exception("'%s' args contain: %r" % (op.getopname(), x)) if op.is_guard(): - faildescr = op.descr + faildescr = op.getdescr() assert isinstance(faildescr, history.AbstractFailDescr) faildescr._fail_args_types = [] - for box in op.fail_args: + for box in op.getfailargs(): if box is None: type = history.HOLE else: @@ -185,7 +186,7 @@ fail_index = self.get_fail_descr_number(faildescr) index = llimpl.compile_add_fail(c, fail_index) faildescr._compiled_fail = c, index - for box in op.fail_args: + for box in op.getfailargs(): if box is not None: llimpl.compile_add_fail_arg(c, var2index[box]) else: @@ -203,13 +204,13 @@ x)) op = operations[-1] assert op.is_final() - if op.opnum == rop.JUMP: - targettoken = op.descr + if op.getopnum() == rop.JUMP: + targettoken = op.getdescr() assert isinstance(targettoken, history.LoopToken) compiled_version = targettoken._llgraph_compiled_version llimpl.compile_add_jump_target(c, compiled_version) - elif op.opnum == rop.FINISH: - faildescr = op.descr + elif op.getopnum() == rop.FINISH: + faildescr = op.getdescr() index = self.get_fail_descr_number(faildescr) llimpl.compile_add_fail(c, index) else: @@ -280,7 +281,7 @@ def __init__(self, *args, **kwds): BaseCPU.__init__(self, *args, **kwds) self.fielddescrof_vtable = self.fielddescrof(rclass.OBJECT, 'typeptr') - + def fielddescrof(self, S, fieldname): ofs, size = symbolic.get_field_token(S, fieldname) token = history.getkind(getattr(S, fieldname)) @@ -504,7 +505,7 @@ return ootype.cast_to_object(e) else: return ootype.NULL - + def get_exc_value(self): if llimpl._last_exception: earg = llimpl._last_exception.args[1] @@ -580,7 +581,7 @@ x = descr.callmeth(selfbox, argboxes) # XXX: return None if METH.RESULT is Void return x - + def make_getargs(ARGS): argsiter = unrolling_iterable(ARGS) @@ -612,7 +613,7 @@ class KeyManager(object): """ Helper class to convert arbitrary dictionary keys to integers. - """ + """ def __init__(self): self.keys = {} @@ -695,7 +696,7 @@ self.ARRAY = ARRAY = ootype.Array(TYPE) def create(): return boxresult(TYPE, ootype.new(TYPE)) - + def create_array(lengthbox): n = lengthbox.getint() return boxresult(ARRAY, ootype.oonewarray(ARRAY, n)) @@ -757,7 +758,7 @@ obj = objbox.getref(TYPE) value = unwrap(T, valuebox) setattr(obj, fieldname, value) - + self.getfield = getfield self.setfield = setfield self._is_pointer_field = (history.getkind(T) == 'ref') Modified: pypy/branch/jitffi/pypy/jit/backend/llsupport/gc.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/llsupport/gc.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/llsupport/gc.py Thu Sep 23 16:53:32 2010 @@ -41,9 +41,12 @@ moving_gc = False gcrootmap = None - def __init__(self, gcdescr, translator, rtyper): - GcLLDescription.__init__(self, gcdescr, translator, rtyper) - # grab a pointer to the Boehm 'malloc' function + @classmethod + def configure_boehm_once(cls): + """ Configure boehm only once, since we don't cache failures + """ + if hasattr(cls, 'malloc_fn_ptr'): + return cls.malloc_fn_ptr from pypy.rpython.tool import rffi_platform compilation_info = rffi_platform.configure_boehm() @@ -59,13 +62,20 @@ GC_MALLOC = "GC_local_malloc" else: GC_MALLOC = "GC_malloc" - malloc_fn_ptr = rffi.llexternal(GC_MALLOC, [lltype.Signed], # size_t, but good enough llmemory.GCREF, compilation_info=compilation_info, sandboxsafe=True, _nowrapper=True) + cls.malloc_fn_ptr = malloc_fn_ptr + cls.compilation_info = compilation_info + return malloc_fn_ptr + + def __init__(self, gcdescr, translator, rtyper): + GcLLDescription.__init__(self, gcdescr, translator, rtyper) + # grab a pointer to the Boehm 'malloc' function + malloc_fn_ptr = self.configure_boehm_once() self.funcptr_for_new = malloc_fn_ptr # on some platform GC_init is required before any other @@ -73,7 +83,7 @@ # XXX move this to tests init_fn_ptr = rffi.llexternal("GC_init", [], lltype.Void, - compilation_info=compilation_info, + compilation_info=self.compilation_info, sandboxsafe=True, _nowrapper=True) @@ -559,12 +569,12 @@ # newops = [] for op in operations: - if op.opnum == rop.DEBUG_MERGE_POINT: + if op.getopnum() == rop.DEBUG_MERGE_POINT: continue # ---------- replace ConstPtrs with GETFIELD_RAW ---------- # xxx some performance issue here - for i in range(len(op.args)): - v = op.args[i] + for i in range(op.numargs()): + v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): addr = self.gcrefs.get_address_of_gcref(v.value) # ^^^even for non-movable objects, to record their presence @@ -574,23 +584,21 @@ newops.append(ResOperation(rop.GETFIELD_RAW, [ConstInt(addr)], box, self.single_gcref_descr)) - op.args[i] = box + op.setarg(i, box) # ---------- write barrier for SETFIELD_GC ---------- - if op.opnum == rop.SETFIELD_GC: - v = op.args[1] + if op.getopnum() == rop.SETFIELD_GC: + v = op.getarg(1) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.args[0], v) - op = ResOperation(rop.SETFIELD_RAW, op.args, None, - descr=op.descr) + self._gen_write_barrier(newops, op.getarg(0), v) + op = op.copy_and_change(rop.SETFIELD_RAW) # ---------- write barrier for SETARRAYITEM_GC ---------- - if op.opnum == rop.SETARRAYITEM_GC: - v = op.args[2] + if op.getopnum() == rop.SETARRAYITEM_GC: + v = op.getarg(2) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.args[0], v) - op = ResOperation(rop.SETARRAYITEM_RAW, op.args, None, - descr=op.descr) + self._gen_write_barrier(newops, op.getarg(0), v) + op = op.copy_and_change(rop.SETARRAYITEM_RAW) # ---------- newops.append(op) del operations[:] Modified: pypy/branch/jitffi/pypy/jit/backend/llsupport/regalloc.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/llsupport/regalloc.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/llsupport/regalloc.py Thu Sep 23 16:53:32 2010 @@ -81,6 +81,10 @@ for v in vars: self.possibly_free_var(v) + def possibly_free_vars_for_op(self, op): + for i in range(op.numargs()): + self.possibly_free_var(op.getarg(i)) + def _check_invariants(self): if not we_are_translated(): # make sure no duplicates Modified: pypy/branch/jitffi/pypy/jit/backend/llsupport/test/test_gc.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/llsupport/test/test_gc.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/llsupport/test/test_gc.py Thu Sep 23 16:53:32 2010 @@ -258,18 +258,18 @@ gc_ll_descr._gen_write_barrier(newops, v_base, v_value) assert llop1.record == [] assert len(newops) == 1 - assert newops[0].opnum == rop.COND_CALL_GC_WB - assert newops[0].args[0] == v_base - assert newops[0].args[1] == v_value + assert newops[0].getopnum() == rop.COND_CALL_GC_WB + assert newops[0].getarg(0) == v_base + assert newops[0].getarg(1) == v_value assert newops[0].result is None - wbdescr = newops[0].descr + wbdescr = newops[0].getdescr() assert isinstance(wbdescr.jit_wb_if_flag, int) assert isinstance(wbdescr.jit_wb_if_flag_byteofs, int) assert isinstance(wbdescr.jit_wb_if_flag_singlebyte, int) def test_get_rid_of_debug_merge_point(self): operations = [ - ResOperation(rop.DEBUG_MERGE_POINT, [], None), + ResOperation(rop.DEBUG_MERGE_POINT, ['dummy'], None), ] gc_ll_descr = self.gc_ll_descr gc_ll_descr.rewrite_assembler(None, operations) @@ -298,13 +298,14 @@ gc_ll_descr.gcrefs = MyFakeGCRefList() gc_ll_descr.rewrite_assembler(MyFakeCPU(), operations) assert len(operations) == 2 - assert operations[0].opnum == rop.GETFIELD_RAW - assert operations[0].args == [ConstInt(43)] - assert operations[0].descr == gc_ll_descr.single_gcref_descr + assert operations[0].getopnum() == rop.GETFIELD_RAW + assert operations[0].getarg(0) == ConstInt(43) + assert operations[0].getdescr() == gc_ll_descr.single_gcref_descr v_box = operations[0].result assert isinstance(v_box, BoxPtr) - assert operations[1].opnum == rop.PTR_EQ - assert operations[1].args == [v_random_box, v_box] + assert operations[1].getopnum() == rop.PTR_EQ + assert operations[1].getarg(0) == v_random_box + assert operations[1].getarg(1) == v_box assert operations[1].result == v_result def test_rewrite_assembler_1_cannot_move(self): @@ -336,8 +337,9 @@ finally: rgc.can_move = old_can_move assert len(operations) == 1 - assert operations[0].opnum == rop.PTR_EQ - assert operations[0].args == [v_random_box, ConstPtr(s_gcref)] + assert operations[0].getopnum() == rop.PTR_EQ + assert operations[0].getarg(0) == v_random_box + assert operations[0].getarg(1) == ConstPtr(s_gcref) assert operations[0].result == v_result # check that s_gcref gets added to the list anyway, to make sure # that the GC sees it @@ -356,14 +358,15 @@ gc_ll_descr.rewrite_assembler(self.fake_cpu, operations) assert len(operations) == 2 # - assert operations[0].opnum == rop.COND_CALL_GC_WB - assert operations[0].args[0] == v_base - assert operations[0].args[1] == v_value + assert operations[0].getopnum() == rop.COND_CALL_GC_WB + assert operations[0].getarg(0) == v_base + assert operations[0].getarg(1) == v_value assert operations[0].result is None # - assert operations[1].opnum == rop.SETFIELD_RAW - assert operations[1].args == [v_base, v_value] - assert operations[1].descr == field_descr + assert operations[1].getopnum() == rop.SETFIELD_RAW + assert operations[1].getarg(0) == v_base + assert operations[1].getarg(1) == v_value + assert operations[1].getdescr() == field_descr def test_rewrite_assembler_3(self): # check write barriers before SETARRAYITEM_GC @@ -379,11 +382,13 @@ gc_ll_descr.rewrite_assembler(self.fake_cpu, operations) assert len(operations) == 2 # - assert operations[0].opnum == rop.COND_CALL_GC_WB - assert operations[0].args[0] == v_base - assert operations[0].args[1] == v_value + assert operations[0].getopnum() == rop.COND_CALL_GC_WB + assert operations[0].getarg(0) == v_base + assert operations[0].getarg(1) == v_value assert operations[0].result is None # - assert operations[1].opnum == rop.SETARRAYITEM_RAW - assert operations[1].args == [v_base, v_index, v_value] - assert operations[1].descr == array_descr + assert operations[1].getopnum() == rop.SETARRAYITEM_RAW + assert operations[1].getarg(0) == v_base + assert operations[1].getarg(1) == v_index + assert operations[1].getarg(2) == v_value + assert operations[1].getdescr() == array_descr Modified: pypy/branch/jitffi/pypy/jit/backend/llvm/compile.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/llvm/compile.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/llvm/compile.py Thu Sep 23 16:53:32 2010 @@ -107,7 +107,7 @@ # store away the exception into self.backup_exc_xxx, *unless* the # branch starts with a further GUARD_EXCEPTION/GUARD_NO_EXCEPTION. if exc: - opnum = operations[0].opnum + opnum = operations[0].getopnum() if opnum not in (rop.GUARD_EXCEPTION, rop.GUARD_NO_EXCEPTION): self._store_away_exception() # Normal handling of the operations follows. @@ -115,7 +115,7 @@ self._generate_op(op) def _generate_op(self, op): - opnum = op.opnum + opnum = op.getopnum() for i, name in all_operations: if opnum == i: meth = getattr(self, name) @@ -475,7 +475,7 @@ return location def generate_GETFIELD_GC(self, op): - loc = self._generate_field_gep(op.args[0], op.descr) + loc = self._generate_field_gep(op.args[0], op.getdescr()) self.vars[op.result] = llvm_rffi.LLVMBuildLoad(self.builder, loc, "") generate_GETFIELD_GC_PURE = generate_GETFIELD_GC @@ -483,7 +483,7 @@ generate_GETFIELD_RAW_PURE = generate_GETFIELD_GC def generate_SETFIELD_GC(self, op): - fielddescr = op.descr + fielddescr = op.getdescr() loc = self._generate_field_gep(op.args[0], fielddescr) assert isinstance(fielddescr, FieldDescr) getarg = self.cpu.getarg_by_index[fielddescr.size_index] @@ -491,7 +491,7 @@ llvm_rffi.LLVMBuildStore(self.builder, value_ref, loc, "") def generate_CALL(self, op): - calldescr = op.descr + calldescr = op.getdescr() assert isinstance(calldescr, CallDescr) ty_function_ptr = self.cpu.get_calldescr_ty_function_ptr(calldescr) v = op.args[0] @@ -579,7 +579,7 @@ self.vars[op.result] = llvm_rffi.LLVMBuildLoad(self.builder, loc, "") def generate_ARRAYLEN_GC(self, op): - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, ArrayDescr) self._generate_len(op, arraydescr.ty_array_ptr, self.cpu.const_array_index_length) @@ -598,7 +598,7 @@ return location def _generate_array_gep(self, op): - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, ArrayDescr) location = self._generate_gep(op, arraydescr.ty_array_ptr, self.cpu.const_array_index_array) @@ -612,7 +612,7 @@ def generate_SETARRAYITEM_GC(self, op): loc = self._generate_array_gep(op) - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, ArrayDescr) getarg = self.cpu.getarg_by_index[arraydescr.itemsize_index] value_ref = getarg(self, op.args[2]) @@ -660,7 +660,7 @@ return res def generate_NEW(self, op): - sizedescr = op.descr + sizedescr = op.getdescr() assert isinstance(sizedescr, SizeDescr) res = self._generate_new(self.cpu._make_const_int(sizedescr.size)) self.vars[op.result] = res @@ -695,7 +695,7 @@ self.vars[op.result] = res def generate_NEW_ARRAY(self, op): - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, ArrayDescr) self._generate_new_array(op, arraydescr.ty_array_ptr, self.cpu._make_const_int(arraydescr.itemsize), Modified: pypy/branch/jitffi/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/test/runner_test.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/test/runner_test.py Thu Sep 23 16:53:32 2010 @@ -1,5 +1,6 @@ import py, sys, random, os, struct, operator from pypy.jit.metainterp.history import (AbstractFailDescr, + AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, LoopToken, @@ -39,7 +40,7 @@ else: raise NotImplementedError(box) res = self.cpu.execute_token(looptoken) - if res is operations[-1].descr: + if res is operations[-1].getdescr(): self.guard_failed = False else: self.guard_failed = True @@ -74,10 +75,11 @@ ResOperation(rop.FINISH, results, None, descr=BasicFailDescr(0))] if operations[0].is_guard(): - operations[0].fail_args = [] + operations[0].setfailargs([]) if not descr: descr = BasicFailDescr(1) - operations[0].descr = descr + if descr is not None: + operations[0].setdescr(descr) inputargs = [] for box in valueboxes: if isinstance(box, Box) and box not in inputargs: @@ -116,7 +118,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [i1] + operations[2].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) @@ -137,7 +139,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [None, None, i1, None] + operations[2].setfailargs([None, None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) @@ -160,7 +162,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [i1] + operations[2].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) self.cpu.compile_loop(inputargs, operations, looptoken) @@ -184,7 +186,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [i1] + operations[2].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -194,7 +196,7 @@ ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), ResOperation(rop.JUMP, [i1b], None, descr=looptoken), ] - bridge[1].fail_args = [i1b] + bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge) @@ -218,7 +220,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [None, i1, None] + operations[2].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -228,7 +230,7 @@ ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), ResOperation(rop.JUMP, [i1b], None, descr=looptoken), ] - bridge[1].fail_args = [i1b] + bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge) @@ -251,7 +253,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [None, i1, None] + operations[2].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) @@ -317,7 +319,7 @@ descr=BasicFailDescr()), ResOperation(rop.JUMP, [z, t], None, descr=looptoken), ] - operations[-2].fail_args = [t, z] + operations[-2].setfailargs([t, z]) cpu.compile_loop([x, y], operations, looptoken) self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 10) @@ -363,7 +365,7 @@ ResOperation(rop.FINISH, [v_res], None, descr=BasicFailDescr(2)), ] - ops[1].fail_args = [] + ops[1].setfailargs([]) else: v_exc = self.cpu.ts.BoxRef() ops = [ @@ -372,7 +374,7 @@ descr=BasicFailDescr(1)), ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2)), ] - ops[1].fail_args = [v_res] + ops[1].setfailargs([v_res]) # looptoken = LoopToken() self.cpu.compile_loop([v1, v2], ops, looptoken) @@ -909,8 +911,8 @@ ResOperation(rop.GUARD_TRUE, [i2], None), ResOperation(rop.JUMP, jumpargs, None, descr=looptoken), ] - operations[2].fail_args = inputargs[:] - operations[2].descr = faildescr + operations[2].setfailargs(inputargs[:]) + operations[2].setdescr(faildescr) # self.cpu.compile_loop(inputargs, operations, looptoken) # @@ -975,7 +977,7 @@ ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), ResOperation(rop.FINISH, fboxes, None, descr=faildescr2), ] - operations[-2].fail_args = fboxes + operations[-2].setfailargs(fboxes) looptoken = LoopToken() self.cpu.compile_loop(fboxes, operations, looptoken) @@ -1098,7 +1100,7 @@ descr=BasicFailDescr(4)), ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(5))] - operations[1].fail_args = [] + operations[1].setfailargs([]) looptoken = LoopToken() # Use "set" to unique-ify inputargs unique_testcase_list = list(set(testcase)) @@ -1412,7 +1414,7 @@ FUNC = self.FuncType([lltype.Ptr(S), lltype.Signed], lltype.Void) func_ptr = llhelper(lltype.Ptr(FUNC), func_void) funcbox = self.get_funcbox(self.cpu, func_ptr) - class WriteBarrierDescr: + class WriteBarrierDescr(AbstractDescr): jit_wb_if_flag = 4096 jit_wb_if_flag_byteofs = struct.pack("i", 4096).index('\x10') jit_wb_if_flag_singlebyte = 0x10 @@ -1462,7 +1464,7 @@ ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] - ops[2].fail_args = [i1, i0] + ops[2].setfailargs([i1, i0]) looptoken = LoopToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, 20) @@ -1506,7 +1508,7 @@ ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] - ops[2].fail_args = [i1, i2, i0] + ops[2].setfailargs([i1, i2, i0]) looptoken = LoopToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, 20) @@ -1551,7 +1553,7 @@ ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [f2], None, descr=BasicFailDescr(0)) ] - ops[2].fail_args = [i1, f2, i0] + ops[2].setfailargs([i1, f2, i0]) looptoken = LoopToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, 20) @@ -1824,7 +1826,7 @@ f2 = float_add(f0, f1) finish(f2)''' loop = parse(ops) - done_number = self.cpu.get_fail_descr_number(loop.operations[-1].descr) + done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) looptoken = LoopToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) Modified: pypy/branch/jitffi/pypy/jit/backend/test/test_ll_random.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/test/test_ll_random.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/test/test_ll_random.py Thu Sep 23 16:53:32 2010 @@ -464,7 +464,7 @@ self.put(builder, args, descr) op = ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=BasicFailDescr()) - op.fail_args = fail_subset + op.setfailargs(fail_subset) builder.loop.operations.append(op) # 5. Non raising-call and GUARD_EXCEPTION @@ -486,7 +486,7 @@ exc_box = ConstAddr(llmemory.cast_ptr_to_adr(vtableptr), builder.cpu) op = ResOperation(rop.GUARD_EXCEPTION, [exc_box], BoxPtr(), descr=BasicFailDescr()) - op.fail_args = builder.subset_of_intvars(r) + op.setfailargs(builder.subset_of_intvars(r)) op._exc_box = None builder.should_fail_by = op builder.guard_op = op @@ -507,7 +507,7 @@ exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu) op = ResOperation(rop.GUARD_EXCEPTION, [exc_box], BoxPtr(), descr=BasicFailDescr()) - op.fail_args = fail_subset + op.setfailargs(fail_subset) builder.loop.operations.append(op) # 4. raising call and guard_no_exception @@ -524,7 +524,7 @@ op = ResOperation(rop.GUARD_NO_EXCEPTION, [], BoxPtr(), descr=BasicFailDescr()) op._exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu) - op.fail_args = builder.subset_of_intvars(r) + op.setfailargs(builder.subset_of_intvars(r)) builder.should_fail_by = op builder.guard_op = op builder.loop.operations.append(op) @@ -548,7 +548,7 @@ op = ResOperation(rop.GUARD_EXCEPTION, [other_box], BoxPtr(), descr=BasicFailDescr()) op._exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu) - op.fail_args = builder.subset_of_intvars(r) + op.setfailargs(builder.subset_of_intvars(r)) builder.should_fail_by = op builder.guard_op = op builder.loop.operations.append(op) Modified: pypy/branch/jitffi/pypy/jit/backend/test/test_random.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/test/test_random.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/test/test_random.py Thu Sep 23 16:53:32 2010 @@ -86,7 +86,7 @@ def process_operation(self, s, op, names, subops): args = [] - for v in op.args: + for v in op.getarglist(): if v in names: args.append(names[v]) ## elif isinstance(v, ConstAddr): @@ -105,11 +105,11 @@ args.append('ConstInt(%d)' % v.value) else: raise NotImplementedError(v) - if op.descr is None: + if op.getdescr() is None: descrstr = '' else: try: - descrstr = ', ' + op.descr._random_info + descrstr = ', ' + op.getdescr()._random_info except AttributeError: descrstr = ', descr=...' print >>s, ' ResOperation(rop.%s, [%s], %s%s),' % ( @@ -129,7 +129,7 @@ def print_loop_prebuilt(ops): for op in ops: - for arg in op.args: + for arg in op.getarglist(): if isinstance(arg, ConstPtr): if arg not in names: writevar(arg, 'const_ptr') @@ -191,7 +191,7 @@ if self.should_fail_by is None: fail_args = self.loop.operations[-1].args else: - fail_args = self.should_fail_by.fail_args + fail_args = self.should_fail_by.getfailargs() for i, v in enumerate(fail_args): if isinstance(v, (BoxFloat, ConstFloat)): print >>s, (' assert cpu.get_latest_value_float(%d) == %r' @@ -284,8 +284,8 @@ builder.intvars[:] = original_intvars else: op = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) - op.descr = BasicFailDescr() - op.fail_args = fail_subset + op.setdescr(BasicFailDescr()) + op.setfailargs(fail_subset) builder.loop.operations.append(op) class BinaryOvfOperation(AbstractOvfOperation, BinaryOperation): @@ -345,8 +345,8 @@ def produce_into(self, builder, r): op, passing = self.gen_guard(builder, r) builder.loop.operations.append(op) - op.descr = BasicFailDescr() - op.fail_args = builder.subset_of_intvars(r) + op.setdescr(BasicFailDescr()) + op.setfailargs(builder.subset_of_intvars(r)) if not passing: builder.should_fail_by = op builder.guard_op = op @@ -553,7 +553,7 @@ endvars = [] used_later = {} for op in loop.operations: - for v in op.args: + for v in op.getarglist(): used_later[v] = True for v in startvars: if v not in used_later: @@ -577,11 +577,11 @@ def get_fail_args(self): if self.should_fail_by.is_guard(): - assert self.should_fail_by.fail_args is not None - return self.should_fail_by.fail_args + assert self.should_fail_by.getfailargs() is not None + return self.should_fail_by.getfailargs() else: - assert self.should_fail_by.opnum == rop.FINISH - return self.should_fail_by.args + assert self.should_fail_by.getopnum() == rop.FINISH + return self.should_fail_by.getarglist() def clear_state(self): for v, S, fields in self.prebuilt_ptr_consts: @@ -606,7 +606,7 @@ else: raise NotImplementedError(box) fail = cpu.execute_token(self.loop.token) - assert fail is self.should_fail_by.descr + assert fail is self.should_fail_by.getdescr() for i, v in enumerate(self.get_fail_args()): if isinstance(v, (BoxFloat, ConstFloat)): value = cpu.get_latest_value_float(i) @@ -620,7 +620,7 @@ exc = cpu.grab_exc_value() if (self.guard_op is not None and self.guard_op.is_guard_exception()): - if self.guard_op.opnum == rop.GUARD_NO_EXCEPTION: + if self.guard_op.getopnum() == rop.GUARD_NO_EXCEPTION: assert exc else: assert not exc @@ -633,26 +633,26 @@ else: op = ResOperation(rop.GUARD_EXCEPTION, [guard_op._exc_box], BoxPtr()) - op.descr = BasicFailDescr() - op.fail_args = [] + op.setdescr(BasicFailDescr()) + op.setfailargs([]) return op if self.dont_generate_more: return False r = self.r guard_op = self.guard_op - fail_args = guard_op.fail_args - fail_descr = guard_op.descr + fail_args = guard_op.getfailargs() + fail_descr = guard_op.getdescr() op = self.should_fail_by - if not op.fail_args: + if not op.getfailargs(): return False # generate the branch: a sequence of operations that ends in a FINISH subloop = DummyLoop([]) if guard_op.is_guard_exception(): subloop.operations.append(exc_handling(guard_op)) bridge_builder = self.builder.fork(self.builder.cpu, subloop, - op.fail_args[:]) - self.generate_ops(bridge_builder, r, subloop, op.fail_args[:]) + op.getfailargs()[:]) + self.generate_ops(bridge_builder, r, subloop, op.getfailargs()[:]) # note that 'self.guard_op' now points to the guard that will fail in # this new bridge, while 'guard_op' still points to the guard that # has just failed. Modified: pypy/branch/jitffi/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/x86/assembler.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/x86/assembler.py Thu Sep 23 16:53:32 2010 @@ -1,16 +1,17 @@ import sys, os from pypy.jit.backend.llsupport import symbolic from pypy.jit.metainterp.history import Const, Box, BoxInt, BoxPtr, BoxFloat -from pypy.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT,\ - LoopToken +from pypy.jit.metainterp.history import (AbstractFailDescr, INT, REF, FLOAT, + LoopToken) from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.tool.uid import fixid -from pypy.jit.backend.x86.regalloc import RegAlloc, \ - X86RegisterManager, X86XMMRegisterManager, get_ebp_ofs +from pypy.jit.backend.x86.regalloc import (RegAlloc, X86RegisterManager, + X86XMMRegisterManager, get_ebp_ofs) -from pypy.jit.backend.x86.arch import FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, IS_X86_32, IS_X86_64 +from pypy.jit.backend.x86.arch import (FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, + IS_X86_32, IS_X86_64) from pypy.jit.backend.x86.regloc import (eax, ecx, edx, ebx, esp, ebp, esi, edi, @@ -389,8 +390,8 @@ def _find_debug_merge_point(self, operations): for op in operations: - if op.opnum == rop.DEBUG_MERGE_POINT: - funcname = op.args[0]._get_str() + if op.getopnum() == rop.DEBUG_MERGE_POINT: + funcname = op.getarg(0)._get_str() break else: funcname = "" % len(self.loop_run_counters) @@ -418,7 +419,6 @@ mc.MOV_ri(X86_64_SCRATCH_REG.value, adr_new_target) mc.JMP_r(X86_64_SCRATCH_REG.value) - mc.valgrind_invalidated() mc.done() def _inject_debugging_code(self, operations): @@ -681,25 +681,25 @@ self.mc.POP(loc) def regalloc_perform(self, op, arglocs, resloc): - genop_list[op.opnum](self, op, arglocs, resloc) + genop_list[op.getopnum()](self, op, arglocs, resloc) def regalloc_perform_discard(self, op, arglocs): - genop_discard_list[op.opnum](self, op, arglocs) + genop_discard_list[op.getopnum()](self, op, arglocs) def regalloc_perform_with_guard(self, op, guard_op, faillocs, arglocs, resloc, current_depths): - faildescr = guard_op.descr + faildescr = guard_op.getdescr() assert isinstance(faildescr, AbstractFailDescr) faildescr._x86_current_depths = current_depths - failargs = guard_op.fail_args - guard_opnum = guard_op.opnum + failargs = guard_op.getfailargs() + guard_opnum = guard_op.getopnum() guard_token = self.implement_guard_recovery(guard_opnum, faildescr, failargs, faillocs) if op is None: dispatch_opnum = guard_opnum else: - dispatch_opnum = op.opnum + dispatch_opnum = op.getopnum() res = genop_guard_list[dispatch_opnum](self, op, guard_op, guard_token, arglocs, resloc) faildescr._x86_adr_jump_offset = res @@ -725,7 +725,7 @@ def _cmpop(cond, rev_cond): def genop_cmp(self, op, arglocs, result_loc): rl = result_loc.lowest8bits() - if isinstance(op.args[0], Const): + if isinstance(op.getarg(0), Const): self.mc.CMP(arglocs[1], arglocs[0]) self.mc.SET_ir(rx86.Conditions[rev_cond], rl.value) else: @@ -755,8 +755,8 @@ def _cmpop_guard(cond, rev_cond, false_cond, false_rev_cond): def genop_cmp_guard(self, op, guard_op, guard_token, arglocs, result_loc): - guard_opnum = guard_op.opnum - if isinstance(op.args[0], Const): + guard_opnum = guard_op.getopnum() + if isinstance(op.getarg(0), Const): self.mc.CMP(arglocs[1], arglocs[0]) if guard_opnum == rop.GUARD_FALSE: return self.implement_guard(guard_token, rev_cond) @@ -773,7 +773,7 @@ def _cmpop_guard_float(cond, false_cond, need_jp): def genop_cmp_guard_float(self, op, guard_op, guard_token, arglocs, result_loc): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() self.mc.UCOMISD(arglocs[0], arglocs[1]) # 16 is enough space for the rel8 jumps below and the rel32 # jump in implement_guard @@ -942,7 +942,7 @@ genop_guard_float_ge = _cmpop_guard_float("AE", "B", False) def genop_guard_float_ne(self, op, guard_op, guard_token, arglocs, result_loc): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() self.mc.UCOMISD(arglocs[0], arglocs[1]) # 16 is enough space for the rel8 jumps below and the rel32 # jump in implement_guard @@ -970,7 +970,7 @@ self.mc.CVTSI2SD(resloc, arglocs[0]) def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() self.mc.CMP(arglocs[0], imm(0)) if guard_opnum == rop.GUARD_TRUE: return self.implement_guard(guard_token, 'Z') @@ -984,7 +984,7 @@ self.mc.MOVZX8(resloc, rl) def genop_guard_int_is_zero(self, op, guard_op, guard_token, arglocs, resloc): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() self.mc.CMP(arglocs[0], imm(0)) if guard_opnum == rop.GUARD_TRUE: return self.implement_guard(guard_token, 'NZ') @@ -1120,7 +1120,7 @@ assert isinstance(baseofs, ImmedLoc) assert isinstance(scale_loc, ImmedLoc) dest_addr = AddressLoc(base_loc, ofs_loc, scale_loc.value, baseofs.value) - if op.args[2].type == FLOAT: + if op.getarg(2).type == FLOAT: self.mc.MOVSD(dest_addr, value_loc) else: if IS_X86_64 and scale_loc.value == 3: @@ -1216,7 +1216,7 @@ return addr def _gen_guard_overflow(self, guard_op, guard_token): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() if guard_opnum == rop.GUARD_NO_OVERFLOW: return self.implement_guard(guard_token, 'O') elif guard_opnum == rop.GUARD_OVERFLOW: @@ -1244,8 +1244,8 @@ genop_guard_guard_isnull = genop_guard_guard_false def genop_guard_guard_value(self, ign_1, guard_op, guard_token, locs, ign_2): - if guard_op.args[0].type == FLOAT: - assert guard_op.args[1].type == FLOAT + if guard_op.getarg(0).type == FLOAT: + assert guard_op.getarg(1).type == FLOAT self.mc.UCOMISD(locs[0], locs[1]) else: self.mc.CMP(locs[0], locs[1]) @@ -1636,8 +1636,8 @@ assert isinstance(sizeloc, ImmedLoc) size = sizeloc.value - if isinstance(op.args[0], Const): - x = imm(op.args[0].getint()) + if isinstance(op.getarg(0), Const): + x = imm(op.getarg(0).getint()) else: x = arglocs[1] if x is eax: @@ -1656,7 +1656,7 @@ def genop_guard_call_may_force(self, op, guard_op, guard_token, arglocs, result_loc): - faildescr = guard_op.descr + faildescr = guard_op.getdescr() fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) self.genop_call(op, arglocs, result_loc) @@ -1665,10 +1665,10 @@ def genop_guard_call_assembler(self, op, guard_op, guard_token, arglocs, result_loc): - faildescr = guard_op.descr + faildescr = guard_op.getdescr() fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) - descr = op.descr + descr = op.getdescr() assert isinstance(descr, LoopToken) assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) # @@ -1753,7 +1753,7 @@ def genop_discard_cond_call_gc_wb(self, op, arglocs): # use 'mc._mc' directly instead of 'mc', to avoid # bad surprizes if the code buffer is mostly full - descr = op.descr + descr = op.getdescr() if we_are_translated(): cls = self.cpu.gc_ll_descr.has_write_barrier_class() assert cls is not None and isinstance(descr, cls) Modified: pypy/branch/jitffi/pypy/jit/backend/x86/codebuf.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/x86/codebuf.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/x86/codebuf.py Thu Sep 23 16:53:32 2010 @@ -29,6 +29,9 @@ self._pos = 0 def overwrite(self, pos, listofchars): + """ Overwrite a specified position with a given list of chars + (position is relative + """ make_sure_not_resized(listofchars) assert pos + len(listofchars) <= self._size for c in listofchars: @@ -49,35 +52,38 @@ self.writechar(chr(n)) def get_relative_pos(self): + """ Current position, relative to code start + """ return self._pos def tell(self): + """ Tell the current address at machine code block + """ baseaddr = rffi.cast(lltype.Signed, self._data) return baseaddr + self._pos - def seekback(self, count): - pos = self._pos - count - self._pos = pos - self._last_dump_start = pos - def done(self): - # normally, no special action is needed here + """ Called at the end of writing of each piece of machine code. + Even though this function doesn't do much, it's extremely important + to call this for all tools to work, like valgrind or machine code + dumping + """ + self.valgrind_invalidated() if machine_code_dumper.enabled: machine_code_dumper.dump_range(self, self._last_dump_start, self._pos) self._last_dump_start = self._pos - def redone(self, frm, to): - if machine_code_dumper.enabled: - baseaddr = rffi.cast(lltype.Signed, self._data) - machine_code_dumper.dump_range(self, frm - baseaddr, to - baseaddr) - def log(self, msg): + """ Insert information into machine code dumper, if enabled + """ if machine_code_dumper.enabled: machine_code_dumper.dump(self, 'LOG', self._pos, msg) def valgrind_invalidated(self): - # mark the range of the InMemoryCodeBuilder as invalidated for Valgrind + """ Mark the range of the InMemoryCodeBuilder as invalidated + for Valgrind + """ from pypy.jit.backend.x86 import valgrind valgrind.discard_translations(self._data, self._size) Modified: pypy/branch/jitffi/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/x86/regalloc.py Thu Sep 23 16:53:32 2010 @@ -234,6 +234,12 @@ else: self.rm.possibly_free_var(var) + def possibly_free_vars_for_op(self, op): + for i in range(op.numargs()): + var = op.getarg(i) + if var is not None: # xxx kludgy + self.possibly_free_var(var) + def possibly_free_vars(self, vars): for var in vars: if var is not None: # xxx kludgy @@ -262,12 +268,12 @@ selected_reg, need_lower_byte) def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.opnum != rop.JUMP or jump.descr is not looptoken: + if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: loop_consts = {} else: loop_consts = {} for i in range(len(inputargs)): - if inputargs[i] is jump.args[i]: + if inputargs[i] is jump.getarg(i): loop_consts[inputargs[i]] = i return loop_consts @@ -312,7 +318,7 @@ self.assembler.regalloc_perform(op, arglocs, result_loc) def locs_for_fail(self, guard_op): - return [self.loc(v) for v in guard_op.fail_args] + return [self.loc(v) for v in guard_op.getfailargs()] def perform_with_guard(self, op, guard_op, arglocs, result_loc): faillocs = self.locs_for_fail(guard_op) @@ -324,7 +330,7 @@ current_depths) if op.result is not None: self.possibly_free_var(op.result) - self.possibly_free_vars(guard_op.fail_args) + self.possibly_free_vars(guard_op.getfailargs()) def perform_guard(self, guard_op, arglocs, result_loc): faillocs = self.locs_for_fail(guard_op) @@ -338,7 +344,7 @@ self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs, result_loc, current_depths) - self.possibly_free_vars(guard_op.fail_args) + self.possibly_free_vars(guard_op.getfailargs()) def PerformDiscard(self, op, arglocs): if not we_are_translated(): @@ -346,24 +352,24 @@ self.assembler.regalloc_perform_discard(op, arglocs) def can_merge_with_next_guard(self, op, i, operations): - if op.opnum == rop.CALL_MAY_FORCE or op.opnum == rop.CALL_ASSEMBLER: - assert operations[i + 1].opnum == rop.GUARD_NOT_FORCED + if op.getopnum() == rop.CALL_MAY_FORCE or op.getopnum() == rop.CALL_ASSEMBLER: + assert operations[i + 1].getopnum() == rop.GUARD_NOT_FORCED return True if not op.is_comparison(): if op.is_ovf(): - if (operations[i + 1].opnum != rop.GUARD_NO_OVERFLOW and - operations[i + 1].opnum != rop.GUARD_OVERFLOW): + if (operations[i + 1].getopnum() != rop.GUARD_NO_OVERFLOW and + operations[i + 1].getopnum() != rop.GUARD_OVERFLOW): print "int_xxx_ovf not followed by guard_(no)_overflow" raise AssertionError return True return False - if (operations[i + 1].opnum != rop.GUARD_TRUE and - operations[i + 1].opnum != rop.GUARD_FALSE): + if (operations[i + 1].getopnum() != rop.GUARD_TRUE and + operations[i + 1].getopnum() != rop.GUARD_FALSE): return False - if operations[i + 1].args[0] is not op.result: + if operations[i + 1].getarg(0) is not op.result: return False if (self.longevity[op.result][1] > i + 1 or - op.result in operations[i + 1].fail_args): + op.result in operations[i + 1].getfailargs()): return False return True @@ -376,13 +382,13 @@ self.xrm.position = i if op.has_no_side_effect() and op.result not in self.longevity: i += 1 - self.possibly_free_vars(op.args) + self.possibly_free_vars_for_op(op) continue if self.can_merge_with_next_guard(op, i, operations): - oplist_with_guard[op.opnum](self, op, operations[i + 1]) + oplist_with_guard[op.getopnum()](self, op, operations[i + 1]) i += 1 else: - oplist[op.opnum](self, op) + oplist[op.getopnum()](self, op) if op.result is not None: self.possibly_free_var(op.result) self.rm._check_invariants() @@ -402,19 +408,20 @@ op = operations[i] if op.result is not None: start_live[op.result] = i - for arg in op.args: + for j in range(op.numargs()): + arg = op.getarg(j) if isinstance(arg, Box): if arg not in start_live: - print "Bogus arg in operation %d at %d" % (op.opnum, i) + print "Bogus arg in operation %d at %d" % (op.getopnum(), i) raise AssertionError longevity[arg] = (start_live[arg], i) if op.is_guard(): - for arg in op.fail_args: + for arg in op.getfailargs(): if arg is None: # hole continue assert isinstance(arg, Box) if arg not in start_live: - print "Bogus arg in guard %d at %d" % (op.opnum, i) + print "Bogus arg in guard %d at %d" % (op.getopnum(), i) raise AssertionError longevity[arg] = (start_live[arg], i) for arg in inputargs: @@ -432,9 +439,9 @@ return self.rm.loc(v) def _consider_guard(self, op): - loc = self.rm.make_sure_var_in_reg(op.args[0]) + loc = self.rm.make_sure_var_in_reg(op.getarg(0)) self.perform_guard(op, [loc], None) - self.rm.possibly_free_var(op.args[0]) + self.rm.possibly_free_var(op.getarg(0)) consider_guard_true = _consider_guard consider_guard_false = _consider_guard @@ -442,52 +449,54 @@ consider_guard_isnull = _consider_guard def consider_finish(self, op): - locs = [self.loc(v) for v in op.args] - locs_are_ref = [v.type == REF for v in op.args] - fail_index = self.assembler.cpu.get_fail_descr_number(op.descr) + locs = [self.loc(op.getarg(i)) for i in range(op.numargs())] + locs_are_ref = [op.getarg(i).type == REF for i in range(op.numargs())] + fail_index = self.assembler.cpu.get_fail_descr_number(op.getdescr()) self.assembler.generate_failure(fail_index, locs, self.exc, locs_are_ref) - self.possibly_free_vars(op.args) + self.possibly_free_vars_for_op(op) def consider_guard_no_exception(self, op): self.perform_guard(op, [], None) def consider_guard_exception(self, op): - loc = self.rm.make_sure_var_in_reg(op.args[0]) + loc = self.rm.make_sure_var_in_reg(op.getarg(0)) box = TempBox() - loc1 = self.rm.force_allocate_reg(box, op.args) + args = op.getarglist() + loc1 = self.rm.force_allocate_reg(box, args) if op.result in self.longevity: # this means, is it ever used - resloc = self.rm.force_allocate_reg(op.result, op.args + [box]) + resloc = self.rm.force_allocate_reg(op.result, args + [box]) else: resloc = None self.perform_guard(op, [loc, loc1], resloc) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) self.rm.possibly_free_var(box) consider_guard_no_overflow = consider_guard_no_exception consider_guard_overflow = consider_guard_no_exception def consider_guard_value(self, op): - x = self.make_sure_var_in_reg(op.args[0]) - y = self.loc(op.args[1]) + x = self.make_sure_var_in_reg(op.getarg(0)) + y = self.loc(op.getarg(1)) self.perform_guard(op, [x, y], None) - self.possibly_free_vars(op.args) + self.possibly_free_vars_for_op(op) def consider_guard_class(self, op): - assert isinstance(op.args[0], Box) - x = self.rm.make_sure_var_in_reg(op.args[0]) - y = self.loc(op.args[1]) + assert isinstance(op.getarg(0), Box) + x = self.rm.make_sure_var_in_reg(op.getarg(0)) + y = self.loc(op.getarg(1)) self.perform_guard(op, [x, y], None) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) consider_guard_nonnull_class = consider_guard_class def _consider_binop_part(self, op): - x = op.args[0] - argloc = self.loc(op.args[1]) - loc = self.rm.force_result_in_reg(op.result, x, op.args) - self.rm.possibly_free_var(op.args[1]) + x = op.getarg(0) + argloc = self.loc(op.getarg(1)) + args = op.getarglist() + loc = self.rm.force_result_in_reg(op.result, x, args) + self.rm.possibly_free_var(op.getarg(1)) return loc, argloc def _consider_binop(self, op): @@ -510,26 +519,27 @@ consider_int_add_ovf = _consider_binop_with_guard def consider_int_neg(self, op): - res = self.rm.force_result_in_reg(op.result, op.args[0]) + res = self.rm.force_result_in_reg(op.result, op.getarg(0)) self.Perform(op, [res], res) consider_int_invert = consider_int_neg def consider_int_lshift(self, op): - if isinstance(op.args[1], Const): - loc2 = self.rm.convert_to_imm(op.args[1]) + if isinstance(op.getarg(1), Const): + loc2 = self.rm.convert_to_imm(op.getarg(1)) else: - loc2 = self.rm.make_sure_var_in_reg(op.args[1], selected_reg=ecx) - loc1 = self.rm.force_result_in_reg(op.result, op.args[0], op.args) + loc2 = self.rm.make_sure_var_in_reg(op.getarg(1), selected_reg=ecx) + args = op.getarglist() + loc1 = self.rm.force_result_in_reg(op.result, op.getarg(0), args) self.Perform(op, [loc1, loc2], loc1) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) consider_int_rshift = consider_int_lshift consider_uint_rshift = consider_int_lshift def _consider_int_div_or_mod(self, op, resultreg, trashreg): - l0 = self.rm.make_sure_var_in_reg(op.args[0], selected_reg=eax) - l1 = self.rm.make_sure_var_in_reg(op.args[1], selected_reg=ecx) + l0 = self.rm.make_sure_var_in_reg(op.getarg(0), selected_reg=eax) + l1 = self.rm.make_sure_var_in_reg(op.getarg(1), selected_reg=ecx) l2 = self.rm.force_allocate_reg(op.result, selected_reg=resultreg) # the register (eax or edx) not holding what we are looking for # will be just trash after that operation @@ -538,7 +548,7 @@ assert l0 is eax assert l1 is ecx assert l2 is resultreg - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) self.rm.possibly_free_var(tmpvar) def consider_int_mod(self, op): @@ -552,17 +562,18 @@ consider_uint_floordiv = consider_int_floordiv def _consider_compop(self, op, guard_op): - vx = op.args[0] - vy = op.args[1] + vx = op.getarg(0) + vy = op.getarg(1) arglocs = [self.loc(vx), self.loc(vy)] if (vx in self.rm.reg_bindings or vy in self.rm.reg_bindings or isinstance(vx, Const) or isinstance(vy, Const)): pass else: arglocs[0] = self.rm.make_sure_var_in_reg(vx) - self.rm.possibly_free_vars(op.args) + args = op.getarglist() + self.rm.possibly_free_vars(args) if guard_op is None: - loc = self.rm.force_allocate_reg(op.result, op.args, + loc = self.rm.force_allocate_reg(op.result, args, need_lower_byte=True) self.Perform(op, arglocs, loc) else: @@ -582,10 +593,11 @@ consider_ptr_ne = _consider_compop def _consider_float_op(self, op): - loc1 = self.xrm.loc(op.args[1]) - loc0 = self.xrm.force_result_in_reg(op.result, op.args[0], op.args) + loc1 = self.xrm.loc(op.getarg(1)) + args = op.getarglist() + loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0), args) self.Perform(op, [loc0, loc1], loc0) - self.xrm.possibly_free_vars(op.args) + self.xrm.possibly_free_vars_for_op(op) consider_float_add = _consider_float_op consider_float_sub = _consider_float_op @@ -593,11 +605,12 @@ consider_float_truediv = _consider_float_op def _consider_float_cmp(self, op, guard_op): - loc0 = self.xrm.make_sure_var_in_reg(op.args[0], op.args, + args = op.getarglist() + loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0), args, imm_fine=False) - loc1 = self.xrm.loc(op.args[1]) + loc1 = self.xrm.loc(op.getarg(1)) arglocs = [loc0, loc1] - self.xrm.possibly_free_vars(op.args) + self.xrm.possibly_free_vars_for_op(op) if guard_op is None: res = self.rm.force_allocate_reg(op.result, need_lower_byte=True) self.Perform(op, arglocs, res) @@ -612,26 +625,26 @@ consider_float_ge = _consider_float_cmp def consider_float_neg(self, op): - loc0 = self.xrm.force_result_in_reg(op.result, op.args[0]) + loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0)) self.Perform(op, [loc0], loc0) - self.xrm.possibly_free_var(op.args[0]) + self.xrm.possibly_free_var(op.getarg(0)) def consider_float_abs(self, op): - loc0 = self.xrm.force_result_in_reg(op.result, op.args[0]) + loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0)) self.Perform(op, [loc0], loc0) - self.xrm.possibly_free_var(op.args[0]) + self.xrm.possibly_free_var(op.getarg(0)) def consider_cast_float_to_int(self, op): - loc0 = self.xrm.make_sure_var_in_reg(op.args[0], imm_fine=False) + loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0), imm_fine=False) loc1 = self.rm.force_allocate_reg(op.result) self.Perform(op, [loc0], loc1) - self.xrm.possibly_free_var(op.args[0]) + self.xrm.possibly_free_var(op.getarg(0)) def consider_cast_int_to_float(self, op): - loc0 = self.rm.loc(op.args[0]) + loc0 = self.rm.loc(op.getarg(0)) loc1 = self.xrm.force_allocate_reg(op.result) self.Perform(op, [loc0], loc1) - self.rm.possibly_free_var(op.args[0]) + self.rm.possibly_free_var(op.getarg(0)) def _call(self, op, arglocs, force_store=[], guard_not_forced_op=None): save_all_regs = guard_not_forced_op is not None @@ -650,11 +663,11 @@ self.Perform(op, arglocs, resloc) def _consider_call(self, op, guard_not_forced_op=None): - calldescr = op.descr + calldescr = op.getdescr() assert isinstance(calldescr, BaseCallDescr) - assert len(calldescr.arg_classes) == len(op.args) - 1 + assert len(calldescr.arg_classes) == op.numargs() - 1 size = calldescr.get_result_size(self.translate_support_code) - self._call(op, [imm(size)] + [self.loc(arg) for arg in op.args], + self._call(op, [imm(size)] + [self.loc(op.getarg(i)) for i in range(op.numargs())], guard_not_forced_op=guard_not_forced_op) def consider_call(self, op): @@ -665,28 +678,29 @@ self._consider_call(op, guard_op) def consider_call_assembler(self, op, guard_op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, LoopToken) jd = descr.outermost_jitdriver_sd assert jd is not None size = jd.portal_calldescr.get_result_size(self.translate_support_code) vable_index = jd.index_of_virtualizable if vable_index >= 0: - self.rm._sync_var(op.args[vable_index]) - vable = self.fm.loc(op.args[vable_index]) + self.rm._sync_var(op.getarg(vable_index)) + vable = self.fm.loc(op.getarg(vable_index)) else: vable = imm(0) self._call(op, [imm(size), vable] + - [self.loc(arg) for arg in op.args], + [self.loc(op.getarg(i)) for i in range(op.numargs())], guard_not_forced_op=guard_op) def consider_cond_call_gc_wb(self, op): assert op.result is None - loc_newvalue = self.rm.make_sure_var_in_reg(op.args[1], op.args) + args = op.getarglist() + loc_newvalue = self.rm.make_sure_var_in_reg(op.getarg(1), args) # ^^^ we force loc_newvalue in a reg (unless it's a Const), # because it will be needed anyway by the following setfield_gc. # It avoids loading it twice from the memory. - loc_base = self.rm.make_sure_var_in_reg(op.args[0], op.args, + loc_base = self.rm.make_sure_var_in_reg(op.getarg(0), args, imm_fine=False) arglocs = [loc_base, loc_newvalue] # add eax, ecx and edx as extra "arguments" to ensure they are @@ -700,7 +714,7 @@ and self.rm.stays_alive(v)): arglocs.append(reg) self.PerformDiscard(op, arglocs) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) def _fastpath_malloc(self, op, descr): assert isinstance(descr, BaseSizeDescr) @@ -725,15 +739,15 @@ def consider_new(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr - if gc_ll_descr.can_inline_malloc(op.descr): - self._fastpath_malloc(op, op.descr) + if gc_ll_descr.can_inline_malloc(op.getdescr()): + self._fastpath_malloc(op, op.getdescr()) else: - args = gc_ll_descr.args_for_new(op.descr) + args = gc_ll_descr.args_for_new(op.getdescr()) arglocs = [imm(x) for x in args] return self._call(op, arglocs) def consider_new_with_vtable(self, op): - classint = op.args[0].getint() + classint = op.getarg(0).getint() descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): self._fastpath_malloc(op, descrsize) @@ -742,34 +756,34 @@ else: args = self.assembler.cpu.gc_ll_descr.args_for_new(descrsize) arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.args[0])) + arglocs.append(self.loc(op.getarg(0))) return self._call(op, arglocs) def consider_newstr(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newstr is not None: # framework GC - loc = self.loc(op.args[0]) + loc = self.loc(op.getarg(0)) return self._call(op, [loc]) # boehm GC (XXX kill the following code at some point) ofs_items, itemsize, ofs = symbolic.get_array_token(rstr.STR, self.translate_support_code) assert itemsize == 1 - return self._malloc_varsize(ofs_items, ofs, 0, op.args[0], + return self._malloc_varsize(ofs_items, ofs, 0, op.getarg(0), op.result) def consider_newunicode(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newunicode is not None: # framework GC - loc = self.loc(op.args[0]) + loc = self.loc(op.getarg(0)) return self._call(op, [loc]) # boehm GC (XXX kill the following code at some point) ofs_items, itemsize, ofs = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code) if itemsize == 4: - return self._malloc_varsize(ofs_items, ofs, 2, op.args[0], + return self._malloc_varsize(ofs_items, ofs, 2, op.getarg(0), op.result) elif itemsize == 2: - return self._malloc_varsize(ofs_items, ofs, 1, op.args[0], + return self._malloc_varsize(ofs_items, ofs, 1, op.getarg(0), op.result) else: assert False, itemsize @@ -784,7 +798,7 @@ else: tempbox = None other_loc = imm(ofs_items + (v.getint() << scale)) - self._call(ResOperation(rop.NEW, [v], res_v), + self._call(ResOperation(rop.NEW, [], res_v), [other_loc], [v]) loc = self.rm.make_sure_var_in_reg(v, [res_v]) assert self.loc(res_v) == eax @@ -792,22 +806,22 @@ self.rm.possibly_free_var(v) if tempbox is not None: self.rm.possibly_free_var(tempbox) - self.PerformDiscard(ResOperation(rop.SETFIELD_GC, [], None), + self.PerformDiscard(ResOperation(rop.SETFIELD_GC, [None, None], None), [eax, imm(ofs_length), imm(WORD), loc]) def consider_new_array(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newarray is not None: # framework GC - args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.descr) + args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.getdescr()) arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.args[0])) + arglocs.append(self.loc(op.getarg(0))) return self._call(op, arglocs) # boehm GC (XXX kill the following code at some point) scale_of_field, basesize, ofs_length, _ = ( - self._unpack_arraydescr(op.descr)) + self._unpack_arraydescr(op.getdescr())) return self._malloc_varsize(basesize, ofs_length, scale_of_field, - op.args[0], op.result) + op.getarg(0), op.result) def _unpack_arraydescr(self, arraydescr): assert isinstance(arraydescr, BaseArrayDescr) @@ -829,50 +843,54 @@ return imm(ofs), imm(size), ptr def consider_setfield_gc(self, op): - ofs_loc, size_loc, ptr = self._unpack_fielddescr(op.descr) + ofs_loc, size_loc, ptr = self._unpack_fielddescr(op.getdescr()) assert isinstance(size_loc, ImmedLoc) if size_loc.value == 1: need_lower_byte = True else: need_lower_byte = False - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - value_loc = self.make_sure_var_in_reg(op.args[1], op.args, + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + value_loc = self.make_sure_var_in_reg(op.getarg(1), args, need_lower_byte=need_lower_byte) - self.possibly_free_vars(op.args) + self.possibly_free_vars(args) self.PerformDiscard(op, [base_loc, ofs_loc, size_loc, value_loc]) consider_setfield_raw = consider_setfield_gc def consider_strsetitem(self, op): - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args) - value_loc = self.rm.make_sure_var_in_reg(op.args[2], op.args, + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) + value_loc = self.rm.make_sure_var_in_reg(op.getarg(2), args, need_lower_byte=True) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) self.PerformDiscard(op, [base_loc, ofs_loc, value_loc]) consider_unicodesetitem = consider_strsetitem def consider_setarrayitem_gc(self, op): - scale, ofs, _, ptr = self._unpack_arraydescr(op.descr) - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) + scale, ofs, _, ptr = self._unpack_arraydescr(op.getdescr()) + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) if scale == 0: need_lower_byte = True else: need_lower_byte = False - value_loc = self.make_sure_var_in_reg(op.args[2], op.args, + value_loc = self.make_sure_var_in_reg(op.getarg(2), args, need_lower_byte=need_lower_byte) - ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args) - self.possibly_free_vars(op.args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) + self.possibly_free_vars(args) self.PerformDiscard(op, [base_loc, ofs_loc, value_loc, imm(scale), imm(ofs)]) consider_setarrayitem_raw = consider_setarrayitem_gc def consider_getfield_gc(self, op): - ofs_loc, size_loc, _ = self._unpack_fielddescr(op.descr) - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - self.rm.possibly_free_vars(op.args) + ofs_loc, size_loc, _ = self._unpack_fielddescr(op.getdescr()) + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + self.rm.possibly_free_vars(args) result_loc = self.force_allocate_reg(op.result) self.Perform(op, [base_loc, ofs_loc, size_loc], result_loc) @@ -881,10 +899,11 @@ consider_getfield_gc_pure = consider_getfield_gc def consider_getarrayitem_gc(self, op): - scale, ofs, _, _ = self._unpack_arraydescr(op.descr) - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args) - self.rm.possibly_free_vars(op.args) + scale, ofs, _, _ = self._unpack_arraydescr(op.getdescr()) + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) + self.rm.possibly_free_vars_for_op(op) result_loc = self.force_allocate_reg(op.result) self.Perform(op, [base_loc, ofs_loc, imm(scale), imm(ofs)], result_loc) @@ -893,8 +912,8 @@ def consider_int_is_true(self, op, guard_op): # doesn't need arg to be in a register - argloc = self.loc(op.args[0]) - self.rm.possibly_free_var(op.args[0]) + argloc = self.loc(op.getarg(0)) + self.rm.possibly_free_var(op.getarg(0)) if guard_op is not None: self.perform_with_guard(op, guard_op, [argloc], None) else: @@ -904,33 +923,36 @@ consider_int_is_zero = consider_int_is_true def consider_same_as(self, op): - argloc = self.loc(op.args[0]) - self.possibly_free_var(op.args[0]) + argloc = self.loc(op.getarg(0)) + self.possibly_free_var(op.getarg(0)) resloc = self.force_allocate_reg(op.result) self.Perform(op, [argloc], resloc) #consider_cast_ptr_to_int = consider_same_as def consider_strlen(self, op): - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - self.rm.possibly_free_vars(op.args) + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + self.rm.possibly_free_vars_for_op(op) result_loc = self.rm.force_allocate_reg(op.result) self.Perform(op, [base_loc], result_loc) consider_unicodelen = consider_strlen def consider_arraylen_gc(self, op): - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, BaseArrayDescr) ofs = arraydescr.get_ofs_length(self.translate_support_code) - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - self.rm.possibly_free_vars(op.args) + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + self.rm.possibly_free_vars_for_op(op) result_loc = self.rm.force_allocate_reg(op.result) self.Perform(op, [base_loc, imm(ofs)], result_loc) def consider_strgetitem(self, op): - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args) - self.rm.possibly_free_vars(op.args) + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) + self.rm.possibly_free_vars_for_op(op) result_loc = self.rm.force_allocate_reg(op.result) self.Perform(op, [base_loc, ofs_loc], result_loc) @@ -939,7 +961,7 @@ def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None - descr = op.descr + descr = op.getdescr() assert isinstance(descr, LoopToken) self.jump_target_descr = descr nonfloatlocs, floatlocs = assembler.target_arglocs(self.jump_target_descr) @@ -951,17 +973,20 @@ xmmtmp = X86XMMRegisterManager.all_regs[0] xmmtmploc = self.xrm.force_allocate_reg(box1, selected_reg=xmmtmp) # Part about non-floats - src_locations = [self.loc(arg) for arg in op.args if arg.type != FLOAT] + # XXX we don't need a copy, we only just the original list + src_locations = [self.loc(op.getarg(i)) for i in range(op.numargs()) + if op.getarg(i).type != FLOAT] assert tmploc not in nonfloatlocs dst_locations = [loc for loc in nonfloatlocs if loc is not None] remap_frame_layout(assembler, src_locations, dst_locations, tmploc) # Part about floats - src_locations = [self.loc(arg) for arg in op.args if arg.type == FLOAT] + src_locations = [self.loc(op.getarg(i)) for i in range(op.numargs()) + if op.getarg(i).type == FLOAT] dst_locations = [loc for loc in floatlocs if loc is not None] remap_frame_layout(assembler, src_locations, dst_locations, xmmtmp) self.rm.possibly_free_var(box) self.xrm.possibly_free_var(box1) - self.possibly_free_vars(op.args) + self.possibly_free_vars_for_op(op) assembler.closing_jump(self.jump_target_descr) def consider_debug_merge_point(self, op): @@ -1002,12 +1027,21 @@ def add_none_argument(fn): return lambda self, op: fn(self, op, None) +def is_comparison_or_ovf_op(opnum): + from pypy.jit.metainterp.resoperation import opclasses, AbstractResOp + cls = opclasses[opnum] + # hack hack: in theory they are instance method, but they don't use + # any instance field, we can use a fake object + class Fake(cls): + pass + op = Fake(None) + return op.is_comparison() or op.is_ovf() + for name, value in RegAlloc.__dict__.iteritems(): if name.startswith('consider_'): name = name[len('consider_'):] num = getattr(rop, name.upper()) - if (ResOperation(num, [], None).is_comparison() - or ResOperation(num, [], None).is_ovf() + if (is_comparison_or_ovf_op(num) or num == rop.CALL_MAY_FORCE or num == rop.CALL_ASSEMBLER): oplist_with_guard[num] = value oplist[num] = add_none_argument(value) Modified: pypy/branch/jitffi/pypy/jit/backend/x86/test/test_recompilation.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/x86/test/test_recompilation.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/x86/test/test_recompilation.py Thu Sep 23 16:53:32 2010 @@ -47,7 +47,7 @@ finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - descr = loop.operations[2].descr + descr = loop.operations[2].getdescr() new = descr._x86_bridge_frame_depth assert descr._x86_bridge_param_depth == 0 # XXX: Maybe add enough ops to force stack on 64-bit as well? @@ -114,8 +114,8 @@ assert loop.token._x86_param_depth == 0 # XXX: Maybe add enough ops to force stack on 64-bit as well? if IS_X86_32: - assert guard_op.descr._x86_bridge_frame_depth > loop_frame_depth - assert guard_op.descr._x86_bridge_param_depth == 0 + assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth + assert guard_op.getdescr()._x86_bridge_param_depth == 0 self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 0) self.cpu.set_future_value_int(2, 0) Modified: pypy/branch/jitffi/pypy/jit/backend/x86/test/test_regalloc.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/x86/test/test_regalloc.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/x86/test/test_regalloc.py Thu Sep 23 16:53:32 2010 @@ -9,7 +9,7 @@ from pypy.jit.backend.llsupport.descr import GcCache from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.regalloc import RegAlloc, X86RegisterManager,\ - FloatConstants + FloatConstants, is_comparison_or_ovf_op from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64 from pypy.jit.metainterp.test.oparser import parse from pypy.rpython.lltypesystem import lltype, llmemory, rffi @@ -17,6 +17,11 @@ from pypy.rpython.lltypesystem import rclass, rstr from pypy.jit.backend.x86.rx86 import * +def test_is_comparison_or_ovf_op(): + assert not is_comparison_or_ovf_op(rop.INT_ADD) + assert is_comparison_or_ovf_op(rop.INT_ADD_OVF) + assert is_comparison_or_ovf_op(rop.INT_EQ) + CPU = getcpuclass() class MockGcDescr(GcCache): def get_funcptr_for_new(self): @@ -159,8 +164,8 @@ assert guard_op.is_guard() bridge = self.parse(ops, **kwds) assert ([box.type for box in bridge.inputargs] == - [box.type for box in guard_op.fail_args]) - faildescr = guard_op.descr + [box.type for box in guard_op.getfailargs()]) + faildescr = guard_op.getdescr() self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations) return bridge @@ -607,7 +612,7 @@ ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].descr._x86_bridge_param_depth == self.expected_param_depth(2) + assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) self.cpu.set_future_value_int(0, 4) self.cpu.set_future_value_int(1, 7) @@ -630,7 +635,7 @@ ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].descr._x86_bridge_param_depth == self.expected_param_depth(2) + assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) self.cpu.set_future_value_int(0, 4) self.cpu.set_future_value_int(1, 7) Modified: pypy/branch/jitffi/pypy/jit/backend/x86/test/test_runner.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/x86/test/test_runner.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/x86/test/test_runner.py Thu Sep 23 16:53:32 2010 @@ -265,7 +265,7 @@ ResOperation(rop.FINISH, [ConstInt(0)], None, descr=BasicFailDescr()), ] - ops[-2].fail_args = [i1] + ops[-2].setfailargs([i1]) looptoken = LoopToken() self.cpu.compile_loop([b], ops, looptoken) if op == rop.INT_IS_TRUE: @@ -314,7 +314,7 @@ ResOperation(rop.FINISH, [ConstInt(0)], None, descr=BasicFailDescr()), ] - ops[-2].fail_args = [i1] + ops[-2].setfailargs([i1]) inputargs = [i for i in (a, b) if isinstance(i, Box)] looptoken = LoopToken() self.cpu.compile_loop(inputargs, ops, looptoken) @@ -353,7 +353,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[3].fail_args = [i1] + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) name, loopaddress, loopsize = agent.functions[0] assert name == "Loop # 0: hello" @@ -368,7 +368,7 @@ ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye")], None), ResOperation(rop.JUMP, [i1b], None, descr=looptoken), ] - bridge[1].fail_args = [i1b] + bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge) name, address, size = agent.functions[1] @@ -462,7 +462,7 @@ cmp_result = BoxInt() ops.append(ResOperation(float_op, args, cmp_result)) ops.append(ResOperation(guard_op, [cmp_result], None, descr=BasicFailDescr())) - ops[-1].fail_args = [failed] + ops[-1].setfailargs([failed]) ops.append(ResOperation(rop.FINISH, [finished], None, descr=BasicFailDescr())) Modified: pypy/branch/jitffi/pypy/jit/metainterp/compile.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/compile.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/compile.py Thu Sep 23 16:53:32 2010 @@ -51,7 +51,7 @@ def compile_new_loop(metainterp, old_loop_tokens, greenkey, start): """Try to compile a new loop by closing the current history back to the first operation. - """ + """ history = metainterp.history loop = create_empty_loop(metainterp) loop.greenkey = greenkey @@ -65,7 +65,7 @@ jitdriver_sd = metainterp.jitdriver_sd loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd) loop.token = loop_token - loop.operations[-1].descr = loop_token # patch the target of the JUMP + loop.operations[-1].setdescr(loop_token) # patch the target of the JUMP try: old_loop_token = jitdriver_sd.warmstate.optimize_loop( metainterp_sd, old_loop_tokens, loop) @@ -133,7 +133,7 @@ metainterp_sd.profiler.end_backend() if not we_are_translated(): metainterp_sd.stats.compiled() - metainterp_sd.log("compiled new bridge") + metainterp_sd.log("compiled new bridge") # ____________________________________________________________ @@ -177,7 +177,7 @@ class TerminatingLoopToken(LoopToken): terminating = True - + def __init__(self, nargs, finishdescr): self.specnodes = [prebuiltNotSpecNode]*nargs self.finishdescr = finishdescr @@ -233,14 +233,14 @@ self.metainterp_sd = metainterp_sd def store_final_boxes(self, guard_op, boxes): - guard_op.fail_args = boxes - self.guard_opnum = guard_op.opnum + guard_op.setfailargs(boxes) + self.guard_opnum = guard_op.getopnum() def make_a_counter_per_value(self, guard_value_op): - assert guard_value_op.opnum == rop.GUARD_VALUE - box = guard_value_op.args[0] + assert guard_value_op.getopnum() == rop.GUARD_VALUE + box = guard_value_op.getarg(0) try: - i = guard_value_op.fail_args.index(box) + i = guard_value_op.getfailargs().index(box) except ValueError: return # xxx probably very rare else: @@ -508,7 +508,7 @@ def compile_new_bridge(metainterp, old_loop_tokens, resumekey): """Try to compile a new bridge leading from the beginning of the history to some existing place. - """ + """ # The history contains new operations to attach as the code for the # failure of 'resumekey.guard_op'. # @@ -540,13 +540,14 @@ op = new_loop.operations[-1] if not isinstance(target_loop_token, TerminatingLoopToken): # normal case - op.descr = target_loop_token # patch the jump target + op.setdescr(target_loop_token) # patch the jump target else: # The target_loop_token is a pseudo loop token, # e.g. loop_tokens_done_with_this_frame_void[0] # Replace the operation with the real operation we want, i.e. a FINISH descr = target_loop_token.finishdescr - new_op = ResOperation(rop.FINISH, op.args, None, descr=descr) + args = op.getarglist() + new_op = ResOperation(rop.FINISH, args, None, descr=descr) new_loop.operations[-1] = new_op # ____________________________________________________________ @@ -597,6 +598,6 @@ ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=faildescr), ResOperation(rop.FINISH, finishargs, None, descr=jd.portal_finishtoken) ] - operations[1].fail_args = [] + operations[1].setfailargs([]) cpu.compile_loop(inputargs, operations, loop_token) return loop_token Modified: pypy/branch/jitffi/pypy/jit/metainterp/graphpage.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/graphpage.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/graphpage.py Thu Sep 23 16:53:32 2010 @@ -17,13 +17,13 @@ for graph, highlight in graphs: for op in graph.get_operations(): if is_interesting_guard(op): - graphs.append((SubGraph(op.descr._debug_suboperations), + graphs.append((SubGraph(op.getdescr()._debug_suboperations), highlight)) graphpage = ResOpGraphPage(graphs, errmsg) graphpage.display() def is_interesting_guard(op): - return hasattr(op.descr, '_debug_suboperations') + return hasattr(op.getdescr(), '_debug_suboperations') class ResOpGraphPage(GraphPage): @@ -76,7 +76,7 @@ for i, op in enumerate(graph.get_operations()): if is_interesting_guard(op): self.mark_starter(graphindex, i+1) - if op.opnum == rop.DEBUG_MERGE_POINT: + if op.getopnum() == rop.DEBUG_MERGE_POINT: if not last_was_mergepoint: last_was_mergepoint = True self.mark_starter(graphindex, i) @@ -155,7 +155,7 @@ op = operations[opindex] lines.append(repr(op)) if is_interesting_guard(op): - tgt = op.descr._debug_suboperations[0] + tgt = op.getdescr()._debug_suboperations[0] tgt_g, tgt_i = self.all_operations[tgt] self.genedge((graphindex, opstartindex), (tgt_g, tgt_i), @@ -167,8 +167,8 @@ self.genedge((graphindex, opstartindex), (graphindex, opindex)) break - if op.opnum == rop.JUMP: - tgt = op.descr + if op.getopnum() == rop.JUMP: + tgt = op.getdescr() tgt_g = -1 if tgt is None: tgt_g = graphindex @@ -191,7 +191,9 @@ def getlinks(self): boxes = {} for op in self.all_operations: - for box in op.args + [op.result]: + args = op.getarglist() + args.append(op.result) + for box in args: if getattr(box, 'is_box', False): boxes[box] = True links = {} Modified: pypy/branch/jitffi/pypy/jit/metainterp/history.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/history.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/history.py Thu Sep 23 16:53:32 2010 @@ -532,7 +532,7 @@ class BoxFloat(Box): type = FLOAT _attrs_ = ('value',) - + def __init__(self, floatval=0.0): assert isinstance(floatval, float) self.value = floatval @@ -759,33 +759,34 @@ assert len(seen) == len(inputargs), ( "duplicate Box in the Loop.inputargs") TreeLoop.check_consistency_of_branch(operations, seen) - + @staticmethod def check_consistency_of_branch(operations, seen): "NOT_RPYTHON" for op in operations: - for box in op.args: + for i in range(op.numargs()): + box = op.getarg(i) if isinstance(box, Box): assert box in seen if op.is_guard(): - assert op.descr is not None - if hasattr(op.descr, '_debug_suboperations'): - ops = op.descr._debug_suboperations + assert op.getdescr() is not None + if hasattr(op.getdescr(), '_debug_suboperations'): + ops = op.getdescr()._debug_suboperations TreeLoop.check_consistency_of_branch(ops, seen.copy()) - for box in op.fail_args or []: + for box in op.getfailargs() or []: if box is not None: assert isinstance(box, Box) assert box in seen else: - assert op.fail_args is None + assert op.getfailargs() is None box = op.result if box is not None: assert isinstance(box, Box) assert box not in seen seen[box] = True assert operations[-1].is_final() - if operations[-1].opnum == rop.JUMP: - target = operations[-1].descr + if operations[-1].getopnum() == rop.JUMP: + target = operations[-1].getdescr() if target is not None: assert isinstance(target, LoopToken) @@ -793,7 +794,8 @@ # RPython-friendly print '%r: inputargs =' % self, self._dump_args(self.inputargs) for op in self.operations: - print '\t', op.getopname(), self._dump_args(op.args), \ + args = op.getarglist() + print '\t', op.getopname(), self._dump_args(args), \ self._dump_box(op.result) def _dump_args(self, boxes): @@ -809,14 +811,14 @@ return '<%s>' % (self.name,) def _list_all_operations(result, operations, omit_finish=True): - if omit_finish and operations[-1].opnum == rop.FINISH: + if omit_finish and operations[-1].getopnum() == rop.FINISH: # xxx obscure return result.extend(operations) for op in operations: - if op.is_guard() and op.descr: - if hasattr(op.descr, '_debug_suboperations'): - ops = op.descr._debug_suboperations + if op.is_guard() and op.getdescr(): + if hasattr(op.getdescr(), '_debug_suboperations'): + ops = op.getdescr()._debug_suboperations _list_all_operations(result, ops, omit_finish) # ____________________________________________________________ @@ -885,7 +887,7 @@ self.aborted_count += 1 def entered(self): - self.enter_count += 1 + self.enter_count += 1 def compiled(self): self.compiled_count += 1 @@ -898,7 +900,7 @@ def add_new_loop(self, loop): self.loops.append(loop) - + # test read interface def get_all_loops(self): Modified: pypy/branch/jitffi/pypy/jit/metainterp/logger.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/logger.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/logger.py Thu Sep 23 16:53:32 2010 @@ -79,27 +79,27 @@ debug_print('[' + args + ']') for i in range(len(operations)): op = operations[i] - if op.opnum == rop.DEBUG_MERGE_POINT: - loc = op.args[0]._get_str() + if op.getopnum() == rop.DEBUG_MERGE_POINT: + loc = op.getarg(0)._get_str() debug_print("debug_merge_point('%s')" % (loc,)) continue - args = ", ".join([self.repr_of_arg(memo, arg) for arg in op.args]) + args = ", ".join([self.repr_of_arg(memo, op.getarg(i)) for i in range(op.numargs())]) if op.result is not None: res = self.repr_of_arg(memo, op.result) + " = " else: res = "" is_guard = op.is_guard() - if op.descr is not None: - descr = op.descr + if op.getdescr() is not None: + descr = op.getdescr() if is_guard and self.guard_number: index = self.metainterp_sd.cpu.get_fail_descr_number(descr) r = "" % index else: r = self.repr_of_descr(descr) args += ', descr=' + r - if is_guard and op.fail_args is not None: + if is_guard and op.getfailargs() is not None: fail_args = ' [' + ", ".join([self.repr_of_arg(memo, arg) - for arg in op.fail_args]) + ']' + for arg in op.getfailargs()]) + ']' else: fail_args = '' debug_print(res + op.getopname() + Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimize.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/optimize.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimize.py Thu Sep 23 16:53:32 2010 @@ -43,7 +43,7 @@ finder.find_nodes_bridge(bridge) for old_loop_token in old_loop_tokens: if finder.bridge_matches(old_loop_token.specnodes): - bridge.operations[-1].descr = old_loop_token # patch jump target + bridge.operations[-1].setdescr(old_loop_token) # patch jump target optimize_bridge_1(metainterp_sd, bridge) return old_loop_token return None Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizefindnode.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/optimizefindnode.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimizefindnode.py Thu Sep 23 16:53:32 2010 @@ -144,7 +144,7 @@ def find_nodes(self, operations): for op in operations: - opnum = op.opnum + opnum = op.getopnum() for value, func in find_nodes_ops: if opnum == value: func(self, op) @@ -154,18 +154,20 @@ def find_nodes_default(self, op): if op.is_always_pure(): - for arg in op.args: + for i in range(op.numargs()): + arg = op.getarg(i) if self.get_constant_box(arg) is None: break else: # all constant arguments: we can constant-fold - argboxes = [self.get_constant_box(arg) for arg in op.args] + argboxes = [self.get_constant_box(op.getarg(i)) + for i in range(op.numargs())] resbox = execute_nonspec(self.cpu, None, - op.opnum, argboxes, op.descr) + op.getopnum(), argboxes, op.getdescr()) self.set_constant_node(op.result, resbox.constbox()) # default case: mark the arguments as escaping - for box in op.args: - self.getnode(box).mark_escaped() + for i in range(op.numargs()): + self.getnode(op.getarg(i)).mark_escaped() def find_nodes_no_escape(self, op): pass # for operations that don't escape their arguments @@ -178,53 +180,53 @@ def find_nodes_NEW_WITH_VTABLE(self, op): instnode = InstanceNode() - box = op.args[0] + box = op.getarg(0) assert isinstance(box, Const) instnode.knownclsbox = box self.nodes[op.result] = instnode def find_nodes_NEW(self, op): instnode = InstanceNode() - instnode.structdescr = op.descr + instnode.structdescr = op.getdescr() self.nodes[op.result] = instnode def find_nodes_NEW_ARRAY(self, op): - lengthbox = op.args[0] + lengthbox = op.getarg(0) lengthbox = self.get_constant_box(lengthbox) if lengthbox is None: return # var-sized arrays are not virtual arraynode = InstanceNode() arraynode.arraysize = lengthbox.getint() - arraynode.arraydescr = op.descr + arraynode.arraydescr = op.getdescr() self.nodes[op.result] = arraynode def find_nodes_ARRAYLEN_GC(self, op): - arraynode = self.getnode(op.args[0]) + arraynode = self.getnode(op.getarg(0)) if arraynode.arraydescr is not None: resbox = ConstInt(arraynode.arraysize) self.set_constant_node(op.result, resbox) def find_nodes_GUARD_CLASS(self, op): - instnode = self.getnode(op.args[0]) + instnode = self.getnode(op.getarg(0)) if instnode.fromstart: # only useful (and safe) in this case - box = op.args[1] + box = op.getarg(1) assert isinstance(box, Const) instnode.knownclsbox = box def find_nodes_GUARD_VALUE(self, op): - instnode = self.getnode(op.args[0]) + instnode = self.getnode(op.getarg(0)) if instnode.fromstart: # only useful (and safe) in this case - box = op.args[1] + box = op.getarg(1) assert isinstance(box, Const) instnode.knownvaluebox = box def find_nodes_SETFIELD_GC(self, op): - instnode = self.getnode(op.args[0]) - fieldnode = self.getnode(op.args[1]) + instnode = self.getnode(op.getarg(0)) + fieldnode = self.getnode(op.getarg(1)) if instnode.escaped: fieldnode.mark_escaped() return # nothing to be gained from tracking the field - field = op.descr + field = op.getdescr() assert isinstance(field, AbstractValue) if instnode.curfields is None: instnode.curfields = {} @@ -232,10 +234,10 @@ instnode.add_escape_dependency(fieldnode) def find_nodes_GETFIELD_GC(self, op): - instnode = self.getnode(op.args[0]) + instnode = self.getnode(op.getarg(0)) if instnode.escaped: return # nothing to be gained from tracking the field - field = op.descr + field = op.getdescr() assert isinstance(field, AbstractValue) if instnode.curfields is not None and field in instnode.curfields: fieldnode = instnode.curfields[field] @@ -254,13 +256,13 @@ find_nodes_GETFIELD_GC_PURE = find_nodes_GETFIELD_GC def find_nodes_SETARRAYITEM_GC(self, op): - indexbox = op.args[1] + indexbox = op.getarg(1) indexbox = self.get_constant_box(indexbox) if indexbox is None: self.find_nodes_default(op) # not a Const index return - arraynode = self.getnode(op.args[0]) - itemnode = self.getnode(op.args[2]) + arraynode = self.getnode(op.getarg(0)) + itemnode = self.getnode(op.getarg(2)) if arraynode.escaped: itemnode.mark_escaped() return # nothing to be gained from tracking the item @@ -270,12 +272,12 @@ arraynode.add_escape_dependency(itemnode) def find_nodes_GETARRAYITEM_GC(self, op): - indexbox = op.args[1] + indexbox = op.getarg(1) indexbox = self.get_constant_box(indexbox) if indexbox is None: self.find_nodes_default(op) # not a Const index return - arraynode = self.getnode(op.args[0]) + arraynode = self.getnode(op.getarg(0)) if arraynode.escaped: return # nothing to be gained from tracking the item index = indexbox.getint() @@ -298,13 +300,15 @@ def find_nodes_JUMP(self, op): # only set up the 'unique' field of the InstanceNodes; # real handling comes later (build_result_specnodes() for loops). - for box in op.args: + for i in range(op.numargs()): + box = op.getarg(i) self.getnode(box).set_unique_nodes() def find_nodes_FINISH(self, op): # only for bridges, and only for the ones that end in a 'return' # or 'raise'; all other cases end with a JUMP. - for box in op.args: + for i in range(op.numargs()): + box = op.getarg(i) self.getnode(box).unique = UNIQUE_NO find_nodes_ops = _findall(NodeFinder, 'find_nodes_') @@ -324,7 +328,7 @@ def show(self): from pypy.jit.metainterp.viewnode import viewnodes, view op = self._loop.operations[-1] - assert op.opnum == rop.JUMP + assert op.getopnum() == rop.JUMP exitnodes = [self.getnode(arg) for arg in op.args] viewnodes(self.inputnodes, exitnodes) if hasattr(self._loop.token, "specnodes"): @@ -343,14 +347,14 @@ # Build the list of specnodes based on the result # computed by NodeFinder.find_nodes(). op = loop.operations[-1] - assert op.opnum == rop.JUMP - assert len(self.inputnodes) == len(op.args) + assert op.getopnum() == rop.JUMP + assert len(self.inputnodes) == op.numargs() while True: self.restart_needed = False specnodes = [] - for i in range(len(op.args)): + for i in range(op.numargs()): inputnode = self.inputnodes[i] - exitnode = self.getnode(op.args[i]) + exitnode = self.getnode(op.getarg(i)) specnodes.append(self.intersect(inputnode, exitnode)) if not self.restart_needed: break @@ -562,9 +566,9 @@ def bridge_matches(self, nextloop_specnodes): jump_op = self.jump_op - assert len(jump_op.args) == len(nextloop_specnodes) + assert jump_op.numargs() == len(nextloop_specnodes) for i in range(len(nextloop_specnodes)): - exitnode = self.getnode(jump_op.args[i]) + exitnode = self.getnode(jump_op.getarg(i)) if not nextloop_specnodes[i].matches_instance_node(exitnode): return False return True Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/__init__.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/__init__.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/__init__.py Thu Sep 23 16:53:32 2010 @@ -1,8 +1,8 @@ -from optimizer import Optimizer -from rewrite import OptRewrite -from intbounds import OptIntBounds -from virtualize import OptVirtualize -from heap import OptHeap +from pypy.jit.metainterp.optimizeopt.optimizer import Optimizer +from pypy.jit.metainterp.optimizeopt.rewrite import OptRewrite +from pypy.jit.metainterp.optimizeopt.intbounds import OptIntBounds +from pypy.jit.metainterp.optimizeopt.virtualize import OptVirtualize +from pypy.jit.metainterp.optimizeopt.heap import OptHeap from ccall import OptCCall def optimize_loop_1(metainterp_sd, loop, virtuals=True): Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/heap.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/heap.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/heap.py Thu Sep 23 16:53:32 2010 @@ -2,7 +2,7 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.rlib.objectmodel import we_are_translated -from optimizer import Optimization +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization class CachedArrayItems(object): def __init__(self): @@ -45,7 +45,7 @@ op = self.lazy_setfields.get(descr, None) if op is None: return None - return self.getvalue(op.args[1]) + return self.getvalue(op.getarg(1)) return d.get(value, None) def cache_arrayitem_value(self, descr, value, indexvalue, fieldvalue, write=False): @@ -105,7 +105,7 @@ if op.is_guard(): self.optimizer.pendingfields = self.force_lazy_setfields_for_guard() return - opnum = op.opnum + opnum = op.getopnum() if (opnum == rop.SETFIELD_GC or opnum == rop.SETARRAYITEM_GC or opnum == rop.DEBUG_MERGE_POINT): @@ -117,7 +117,7 @@ if opnum == rop.CALL_ASSEMBLER: effectinfo = None else: - effectinfo = op.descr.get_extra_info() + effectinfo = op.getdescr().get_extra_info() if effectinfo is not None: # XXX we can get the wrong complexity here, if the lists # XXX stored on effectinfo are large @@ -142,7 +142,7 @@ return self.force_all_lazy_setfields() elif op.is_final() or (not we_are_translated() and - op.opnum < 0): # escape() operations + op.getopnum() < 0): # escape() operations self.force_all_lazy_setfields() self.clean_caches() @@ -166,10 +166,11 @@ # - is_comparison() for cases like "int_eq/setfield_gc/guard_true" # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced" # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow" - opnum = prevop.opnum + opnum = prevop.getopnum() + lastop_args = lastop.getarglist() if ((prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE or prevop.is_ovf()) - and prevop.result not in lastop.args): + and prevop.result not in lastop_args): newoperations[-2] = lastop newoperations[-1] = prevop @@ -189,9 +190,9 @@ # the only really interesting case that we need to handle in the # guards' resume data is that of a virtual object that is stored # into a field of a non-virtual object. - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) assert not value.is_virtual() # it must be a non-virtual - fieldvalue = self.getvalue(op.args[1]) + fieldvalue = self.getvalue(op.getarg(1)) if fieldvalue.is_virtual(): # this is the case that we leave to resume.py pendingfields.append((descr, value.box, @@ -202,20 +203,20 @@ def force_lazy_setfield_if_necessary(self, op, value, write=False): try: - op1 = self.lazy_setfields[op.descr] + op1 = self.lazy_setfields[op.getdescr()] except KeyError: if write: - self.lazy_setfields_descrs.append(op.descr) + self.lazy_setfields_descrs.append(op.getdescr()) else: - if self.getvalue(op1.args[0]) is not value: - self.force_lazy_setfield(op.descr) + if self.getvalue(op1.getarg(0)) is not value: + self.force_lazy_setfield(op.getdescr()) def optimize_GETFIELD_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) self.force_lazy_setfield_if_necessary(op, value) # check if the field was read from another getfield_gc just before # or has been written to recently - fieldvalue = self.read_cached_field(op.descr, value) + fieldvalue = self.read_cached_field(op.getdescr(), value) if fieldvalue is not None: self.make_equal_to(op.result, fieldvalue) return @@ -225,38 +226,38 @@ self.emit_operation(op) # FIXME: These might need constant propagation? # then remember the result of reading the field fieldvalue = self.getvalue(op.result) - self.cache_field_value(op.descr, value, fieldvalue) + self.cache_field_value(op.getdescr(), value, fieldvalue) def optimize_SETFIELD_GC(self, op): - value = self.getvalue(op.args[0]) - fieldvalue = self.getvalue(op.args[1]) + value = self.getvalue(op.getarg(0)) + fieldvalue = self.getvalue(op.getarg(1)) self.force_lazy_setfield_if_necessary(op, value, write=True) - self.lazy_setfields[op.descr] = op + self.lazy_setfields[op.getdescr()] = op # remember the result of future reads of the field - self.cache_field_value(op.descr, value, fieldvalue, write=True) + self.cache_field_value(op.getdescr(), value, fieldvalue, write=True) def optimize_GETARRAYITEM_GC(self, op): - value = self.getvalue(op.args[0]) - indexvalue = self.getvalue(op.args[1]) - fieldvalue = self.read_cached_arrayitem(op.descr, value, indexvalue) + value = self.getvalue(op.getarg(0)) + indexvalue = self.getvalue(op.getarg(1)) + fieldvalue = self.read_cached_arrayitem(op.getdescr(), value, indexvalue) if fieldvalue is not None: self.make_equal_to(op.result, fieldvalue) return ###self.optimizer.optimize_default(op) self.emit_operation(op) # FIXME: These might need constant propagation? fieldvalue = self.getvalue(op.result) - self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue) + self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue) def optimize_SETARRAYITEM_GC(self, op): self.emit_operation(op) - value = self.getvalue(op.args[0]) - fieldvalue = self.getvalue(op.args[2]) - indexvalue = self.getvalue(op.args[1]) - self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue, + value = self.getvalue(op.getarg(0)) + fieldvalue = self.getvalue(op.getarg(2)) + indexvalue = self.getvalue(op.getarg(1)) + self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue, write=True) def propagate_forward(self, op): - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/intbounds.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/intbounds.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/intbounds.py Thu Sep 23 16:53:32 2010 @@ -1,6 +1,7 @@ -from optimizer import Optimization, CONST_1, CONST_0 +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization, CONST_1, CONST_0 from pypy.jit.metainterp.optimizeutil import _findall -from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded +from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded, \ + IntLowerBound from pypy.jit.metainterp.history import Const, ConstInt from pypy.jit.metainterp.resoperation import rop, ResOperation @@ -9,7 +10,7 @@ remove redundant guards""" def propagate_forward(self, op): - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) @@ -25,29 +26,29 @@ b = v.intbound if b.has_lower and b.has_upper and b.lower == b.upper: v.make_constant(ConstInt(b.lower)) - + try: op = self.optimizer.producer[box] except KeyError: return - opnum = op.opnum + opnum = op.getopnum() for value, func in propagate_bounds_ops: if opnum == value: func(self, op) break - + def optimize_GUARD_TRUE(self, op): self.emit_operation(op) - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) optimize_GUARD_FALSE = optimize_GUARD_TRUE optimize_GUARD_VALUE = optimize_GUARD_TRUE def optimize_INT_AND(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) - + r = self.getvalue(op.result) if v2.is_constant(): val = v2.box.getint() @@ -57,76 +58,76 @@ val = v1.box.getint() if val >= 0: r.intbound.intersect(IntBound(0,val)) - + def optimize_INT_SUB(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(v1.intbound.sub_bound(v2.intbound)) - + def optimize_INT_ADD(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(v1.intbound.add_bound(v2.intbound)) def optimize_INT_MUL(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(v1.intbound.mul_bound(v2.intbound)) def optimize_INT_ADD_OVF(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.add_bound(v2.intbound) if resbound.has_lower and resbound.has_upper and \ - self.nextop().opnum == rop.GUARD_NO_OVERFLOW: + self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW: # Transform into INT_ADD and remove guard - op.opnum = rop.INT_ADD + op = op.copy_and_change(rop.INT_ADD) self.skip_nextop() - self.optimize_INT_ADD(op) + self.optimize_INT_ADD(op) # emit the op else: self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(resbound) - + def optimize_INT_SUB_OVF(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.sub_bound(v2.intbound) if resbound.has_lower and resbound.has_upper and \ - self.nextop().opnum == rop.GUARD_NO_OVERFLOW: + self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW: # Transform into INT_SUB and remove guard - op.opnum = rop.INT_SUB + op = op.copy_and_change(rop.INT_SUB) self.skip_nextop() - self.optimize_INT_SUB(op) + self.optimize_INT_SUB(op) # emit the op else: self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(resbound) def optimize_INT_MUL_OVF(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.mul_bound(v2.intbound) if resbound.has_lower and resbound.has_upper and \ - self.nextop().opnum == rop.GUARD_NO_OVERFLOW: + self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW: # Transform into INT_MUL and remove guard - op.opnum = rop.INT_MUL + op = op.copy_and_change(rop.INT_MUL) self.skip_nextop() - self.optimize_INT_MUL(op) + self.optimize_INT_MUL(op) # emit the op else: self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(resbound) - + def optimize_INT_LT(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_ge(v2.intbound): @@ -135,8 +136,8 @@ self.emit_operation(op) def optimize_INT_GT(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_le(v2.intbound): @@ -145,8 +146,8 @@ self.emit_operation(op) def optimize_INT_LE(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_le(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_gt(v2.intbound): @@ -155,8 +156,8 @@ self.emit_operation(op) def optimize_INT_GE(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_ge(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_lt(v2.intbound): @@ -165,134 +166,140 @@ self.emit_operation(op) def optimize_INT_EQ(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 0) elif v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 0) - else: + else: self.emit_operation(op) - + def optimize_INT_NE(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 1) - else: + else: self.emit_operation(op) - - def make_int_lt(self, args): - v1 = self.getvalue(args[0]) - v2 = self.getvalue(args[1]) + + def optimize_ARRAYLEN_GC(self, op): + self.emit_operation(op) + v1 = self.getvalue(op.result) + v1.intbound.make_ge(IntLowerBound(0)) + + optimize_STRLEN = optimize_ARRAYLEN_GC + + def make_int_lt(self, box1, box2): + v1 = self.getvalue(box1) + v2 = self.getvalue(box2) if v1.intbound.make_lt(v2.intbound): - self.propagate_bounds_backward(args[0]) + self.propagate_bounds_backward(box1) if v2.intbound.make_gt(v1.intbound): - self.propagate_bounds_backward(args[1]) - + self.propagate_bounds_backward(box2) - def make_int_le(self, args): - v1 = self.getvalue(args[0]) - v2 = self.getvalue(args[1]) + def make_int_le(self, box1, box2): + v1 = self.getvalue(box1) + v2 = self.getvalue(box2) if v1.intbound.make_le(v2.intbound): - self.propagate_bounds_backward(args[0]) + self.propagate_bounds_backward(box1) if v2.intbound.make_ge(v1.intbound): - self.propagate_bounds_backward(args[1]) + self.propagate_bounds_backward(box2) - def make_int_gt(self, args): - self.make_int_lt([args[1], args[0]]) + def make_int_gt(self, box1, box2): + self.make_int_lt(box2, box1) - def make_int_ge(self, args): - self.make_int_le([args[1], args[0]]) + def make_int_ge(self, box1, box2): + self.make_int_le(box2, box1) def propagate_bounds_INT_LT(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - self.make_int_lt(op.args) + self.make_int_lt(op.getarg(0), op.getarg(1)) else: - self.make_int_ge(op.args) + self.make_int_ge(op.getarg(0), op.getarg(1)) def propagate_bounds_INT_GT(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - self.make_int_gt(op.args) + self.make_int_gt(op.getarg(0), op.getarg(1)) else: - self.make_int_le(op.args) + self.make_int_le(op.getarg(0), op.getarg(1)) def propagate_bounds_INT_LE(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - self.make_int_le(op.args) + self.make_int_le(op.getarg(0), op.getarg(1)) else: - self.make_int_gt(op.args) + self.make_int_gt(op.getarg(0), op.getarg(1)) def propagate_bounds_INT_GE(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - self.make_int_ge(op.args) + self.make_int_ge(op.getarg(0), op.getarg(1)) else: - self.make_int_lt(op.args) + self.make_int_lt(op.getarg(0), op.getarg(1)) def propagate_bounds_INT_EQ(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.intersect(v2.intbound): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) if v2.intbound.intersect(v1.intbound): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_NE(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_0): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.intersect(v2.intbound): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) if v2.intbound.intersect(v1.intbound): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_ADD(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) r = self.getvalue(op.result) b = r.intbound.sub_bound(v2.intbound) if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) b = r.intbound.sub_bound(v1.intbound) if v2.intbound.intersect(b): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_SUB(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) r = self.getvalue(op.result) b = r.intbound.add_bound(v2.intbound) if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) b = r.intbound.sub_bound(v1.intbound).mul(-1) if v2.intbound.intersect(b): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_MUL(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) r = self.getvalue(op.result) b = r.intbound.div_bound(v2.intbound) if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) b = r.intbound.div_bound(v1.intbound) if v2.intbound.intersect(b): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) propagate_bounds_INT_ADD_OVF = propagate_bounds_INT_ADD propagate_bounds_INT_SUB_OVF = propagate_bounds_INT_SUB @@ -300,4 +307,3 @@ optimize_ops = _findall(OptIntBounds, 'optimize_') propagate_bounds_ops = _findall(OptIntBounds, 'propagate_bounds_') - Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/optimizer.py Thu Sep 23 16:53:32 2010 @@ -11,17 +11,17 @@ from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.rpython.lltypesystem import lltype from pypy.jit.metainterp.history import AbstractDescr, make_hashable_int -from intutils import IntBound, IntUnbounded +from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded LEVEL_UNKNOWN = '\x00' LEVEL_NONNULL = '\x01' LEVEL_KNOWNCLASS = '\x02' # might also mean KNOWNARRAYDESCR, for arrays -LEVEL_CONSTANT = '\x03' +LEVEL_CONSTANT = '\x03' import sys MAXINT = sys.maxint MININT = -sys.maxint - 1 - + class OptValue(object): _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound') last_guard_index = -1 @@ -36,7 +36,7 @@ if isinstance(box, Const): self.make_constant(box) # invariant: box is a Const if and only if level == LEVEL_CONSTANT - + def force_box(self): return self.box @@ -171,7 +171,7 @@ def new_const_item(self, arraydescr): return self.optimizer.new_const_item(arraydescr) - + def pure(self, opnum, args, result): op = ResOperation(opnum, args, result) self.optimizer.pure_operations[self.optimizer.make_args_key(op)] = op @@ -184,10 +184,10 @@ def setup(self, virtuals): pass - + class Optimizer(Optimization): - def __init__(self, metainterp_sd, loop, optimizations=[], virtuals=True): + def __init__(self, metainterp_sd, loop, optimizations=None, virtuals=True): self.metainterp_sd = metainterp_sd self.cpu = metainterp_sd.cpu self.loop = loop @@ -199,10 +199,8 @@ self.pure_operations = args_dict() self.producer = {} self.pendingfields = [] - - if len(optimizations) == 0: - self.first_optimization = self - else: + + if optimizations: self.first_optimization = optimizations[0] for i in range(1, len(optimizations)): optimizations[i - 1].next_optimization = optimizations[i] @@ -210,6 +208,8 @@ for o in optimizations: o.optimizer = self o.setup(virtuals) + else: + self.first_optimization = self def forget_numberings(self, virtualbox): self.metainterp_sd.profiler.count(jitprof.OPT_FORCINGS) @@ -308,7 +308,7 @@ def propagate_forward(self, op): self.producer[op.result] = op - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) @@ -323,15 +323,15 @@ self._emit_operation(op) def _emit_operation(self, op): - for i in range(len(op.args)): - arg = op.args[i] + for i in range(op.numargs()): + arg = op.getarg(i) if arg in self.values: box = self.values[arg].force_box() - op.args[i] = box + op.setarg(i, box) self.metainterp_sd.profiler.count(jitprof.OPT_OPS) if op.is_guard(): self.metainterp_sd.profiler.count(jitprof.OPT_GUARDS) - self.store_final_boxes_in_guard(op) + op = self.store_final_boxes_in_guard(op) elif op.can_raise(): self.exception_might_have_happened = True elif op.returns_bool_result(): @@ -340,7 +340,7 @@ def store_final_boxes_in_guard(self, op): ###pendingfields = self.heap_op_optimizer.force_lazy_setfields_for_guard() - descr = op.descr + descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo) newboxes = modifier.finish(self.values, self.pendingfields) @@ -348,49 +348,54 @@ compile.giveup() descr.store_final_boxes(op, newboxes) # - if op.opnum == rop.GUARD_VALUE: - if self.getvalue(op.args[0]) in self.bool_boxes: + if op.getopnum() == rop.GUARD_VALUE: + if self.getvalue(op.getarg(0)) in self.bool_boxes: # Hack: turn guard_value(bool) into guard_true/guard_false. - # This is done after the operation is emitted, to let - # store_final_boxes_in_guard set the guard_opnum field - # of the descr to the original rop.GUARD_VALUE. - constvalue = op.args[1].getint() + # This is done after the operation is emitted to let + # store_final_boxes_in_guard set the guard_opnum field of the + # descr to the original rop.GUARD_VALUE. + constvalue = op.getarg(1).getint() if constvalue == 0: opnum = rop.GUARD_FALSE elif constvalue == 1: opnum = rop.GUARD_TRUE else: raise AssertionError("uh?") - op.opnum = opnum - op.args = [op.args[0]] + newop = ResOperation(opnum, [op.getarg(0)], op.result, descr) + newop.setfailargs(op.getfailargs()) + return newop else: # a real GUARD_VALUE. Make it use one counter per value. descr.make_a_counter_per_value(op) + return op def make_args_key(self, op): - args = op.args[:] - for i in range(len(args)): - arg = args[i] + args = [] + for i in range(op.numargs()): + arg = op.getarg(i) if arg in self.values: - args[i] = self.values[arg].get_key_box() - args.append(ConstInt(op.opnum)) + args.append(self.values[arg].get_key_box()) + else: + args.append(arg) + args.append(ConstInt(op.getopnum())) return args - + def optimize_default(self, op): canfold = op.is_always_pure() is_ovf = op.is_ovf() if is_ovf: nextop = self.loop.operations[self.i + 1] - canfold = nextop.opnum == rop.GUARD_NO_OVERFLOW + canfold = nextop.getopnum() == rop.GUARD_NO_OVERFLOW if canfold: - for arg in op.args: - if self.get_constant_box(arg) is None: + for i in range(op.numargs()): + if self.get_constant_box(op.getarg(i)) is None: break else: # all constant arguments: constant-fold away - argboxes = [self.get_constant_box(arg) for arg in op.args] + argboxes = [self.get_constant_box(op.getarg(i)) + for i in range(op.numargs())] resbox = execute_nonspec(self.cpu, None, - op.opnum, argboxes, op.descr) + op.getopnum(), argboxes, op.getdescr()) self.make_constant(op.result, resbox.constbox()) if is_ovf: self.i += 1 # skip next operation, it is the unneeded guard @@ -399,8 +404,8 @@ # did we do the exact same operation already? args = self.make_args_key(op) oldop = self.pure_operations.get(args, None) - if oldop is not None and oldop.descr is op.descr: - assert oldop.opnum == op.opnum + if oldop is not None and oldop.getdescr() is op.getdescr(): + assert oldop.getopnum() == op.getopnum() self.make_equal_to(op.result, self.getvalue(oldop.result)) if is_ovf: self.i += 1 # skip next operation, it is the unneeded guard Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/rewrite.py Thu Sep 23 16:53:32 2010 @@ -1,11 +1,11 @@ -from optimizer import * +from pypy.jit.metainterp.optimizeopt.optimizer import * from pypy.jit.metainterp.resoperation import opboolinvers, opboolreflex from pypy.jit.metainterp.history import ConstInt from pypy.jit.metainterp.optimizeutil import _findall from pypy.jit.metainterp.resoperation import rop, ResOperation class OptRewrite(Optimization): - """Rewrite operations into equvivialent, cheeper operations. + """Rewrite operations into equivalent, cheaper operations. This includes already executed operations and constants. """ @@ -14,7 +14,7 @@ if self.find_rewritable_bool(op, args): return - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) @@ -24,7 +24,7 @@ def try_boolinvers(self, op, targs): oldop = self.optimizer.pure_operations.get(targs, None) - if oldop is not None and oldop.descr is op.descr: + if oldop is not None and oldop.getdescr() is op.getdescr(): value = self.getvalue(oldop.result) if value.is_constant(): if value.box.same_constant(CONST_1): @@ -39,7 +39,7 @@ def find_rewritable_bool(self, op, args): try: - oldopnum = opboolinvers[op.opnum] + oldopnum = opboolinvers[op.getopnum()] targs = [args[0], args[1], ConstInt(oldopnum)] if self.try_boolinvers(op, targs): return True @@ -47,17 +47,17 @@ pass try: - oldopnum = opboolreflex[op.opnum] # FIXME: add INT_ADD, INT_MUL + oldopnum = opboolreflex[op.getopnum()] # FIXME: add INT_ADD, INT_MUL targs = [args[1], args[0], ConstInt(oldopnum)] oldop = self.optimizer.pure_operations.get(targs, None) - if oldop is not None and oldop.descr is op.descr: + if oldop is not None and oldop.getdescr() is op.getdescr(): self.make_equal_to(op.result, self.getvalue(oldop.result)) return True except KeyError: pass try: - oldopnum = opboolinvers[opboolreflex[op.opnum]] + oldopnum = opboolinvers[opboolreflex[op.getopnum()]] targs = [args[1], args[0], ConstInt(oldopnum)] if self.try_boolinvers(op, targs): return True @@ -67,16 +67,16 @@ return False def optimize_INT_AND(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.is_null() or v2.is_null(): self.make_constant_int(op.result, 0) else: self.emit_operation(op) def optimize_INT_OR(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.is_null(): self.make_equal_to(op.result, v2) elif v2.is_null(): @@ -85,20 +85,20 @@ self.emit_operation(op) def optimize_INT_SUB(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v2.is_constant() and v2.box.getint() == 0: self.make_equal_to(op.result, v1) else: self.emit_operation(op) # Synthesize the reverse ops for optimize_default to reuse - self.pure(rop.INT_ADD, [op.result, op.args[1]], op.args[0]) - self.pure(rop.INT_SUB, [op.args[0], op.result], op.args[1]) + self.pure(rop.INT_ADD, [op.result, op.getarg(1)], op.getarg(0)) + self.pure(rop.INT_SUB, [op.getarg(0), op.result], op.getarg(1)) def optimize_INT_ADD(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) # If one side of the op is 0 the result is the other side. if v1.is_constant() and v1.box.getint() == 0: @@ -109,12 +109,12 @@ self.emit_operation(op) # Synthesize the reverse op for optimize_default to reuse - self.pure(rop.INT_SUB, [op.result, op.args[1]], op.args[0]) - self.pure(rop.INT_SUB, [op.result, op.args[0]], op.args[1]) + self.pure(rop.INT_SUB, [op.result, op.getarg(1)], op.getarg(0)) + self.pure(rop.INT_SUB, [op.result, op.getarg(0)], op.getarg(1)) def optimize_INT_MUL(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) # If one side of the op is 1 the result is the other side. if v1.is_constant() and v1.box.getint() == 1: @@ -128,18 +128,20 @@ self.emit_operation(op) def optimize_CALL_PURE(self, op): - for arg in op.args: + for i in range(op.numargs()): + arg = op.getarg(i) if self.get_constant_box(arg) is None: break else: # all constant arguments: constant-fold away - self.make_constant(op.result, op.args[0]) + self.make_constant(op.result, op.getarg(0)) return # replace CALL_PURE with just CALL - self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result, - op.descr)) + args = op.getarglist()[1:] + self.emit_operation(ResOperation(rop.CALL, args, op.result, + op.getdescr())) def optimize_guard(self, op, constbox, emit_operation=True): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_constant(): box = value.box assert isinstance(box, Const) @@ -151,7 +153,7 @@ value.make_constant(constbox) def optimize_GUARD_ISNULL(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_null(): return elif value.is_nonnull(): @@ -160,7 +162,7 @@ value.make_constant(self.optimizer.cpu.ts.CONST_NULL) def optimize_GUARD_NONNULL(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_nonnull(): return elif value.is_null(): @@ -169,25 +171,25 @@ value.make_nonnull(len(self.optimizer.newoperations) - 1) def optimize_GUARD_VALUE(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) emit_operation = True if value.last_guard_index != -1: # there already has been a guard_nonnull or guard_class or # guard_nonnull_class on this value, which is rather silly. # replace the original guard with a guard_value old_guard_op = self.optimizer.newoperations[value.last_guard_index] - old_opnum = old_guard_op.opnum - old_guard_op.opnum = rop.GUARD_VALUE - old_guard_op.args = [old_guard_op.args[0], op.args[1]] + new_guard_op = old_guard_op.copy_and_change(rop.GUARD_VALUE, + args = [old_guard_op.getarg(0), op.getarg(1)]) + self.optimizer.newoperations[value.last_guard_index] = new_guard_op # hack hack hack. Change the guard_opnum on - # old_guard_op.descr so that when resuming, + # new_guard_op.getdescr() so that when resuming, # the operation is not skipped by pyjitpl.py. - descr = old_guard_op.descr + descr = new_guard_op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) descr.guard_opnum = rop.GUARD_VALUE - descr.make_a_counter_per_value(old_guard_op) + descr.make_a_counter_per_value(new_guard_op) emit_operation = False - constbox = op.args[1] + constbox = op.getarg(1) assert isinstance(constbox, Const) self.optimize_guard(op, constbox, emit_operation) @@ -198,8 +200,8 @@ self.optimize_guard(op, CONST_0) def optimize_GUARD_CLASS(self, op): - value = self.getvalue(op.args[0]) - expectedclassbox = op.args[1] + value = self.getvalue(op.getarg(0)) + expectedclassbox = op.getarg(1) assert isinstance(expectedclassbox, Const) realclassbox = value.get_constant_class(self.optimizer.cpu) if realclassbox is not None: @@ -213,15 +215,16 @@ # there already has been a guard_nonnull or guard_class or # guard_nonnull_class on this value. old_guard_op = self.optimizer.newoperations[value.last_guard_index] - if old_guard_op.opnum == rop.GUARD_NONNULL: + if old_guard_op.getopnum() == rop.GUARD_NONNULL: # it was a guard_nonnull, which we replace with a # guard_nonnull_class. - old_guard_op.opnum = rop.GUARD_NONNULL_CLASS - old_guard_op.args = [old_guard_op.args[0], op.args[1]] + new_guard_op = old_guard_op.copy_and_change (rop.GUARD_NONNULL_CLASS, + args = [old_guard_op.getarg(0), op.getarg(1)]) + self.optimizer.newoperations[value.last_guard_index] = new_guard_op # hack hack hack. Change the guard_opnum on - # old_guard_op.descr so that when resuming, + # new_guard_op.getdescr() so that when resuming, # the operation is not skipped by pyjitpl.py. - descr = old_guard_op.descr + descr = new_guard_op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) descr.guard_opnum = rop.GUARD_NONNULL_CLASS emit_operation = False @@ -239,18 +242,18 @@ self.optimizer.exception_might_have_happened = False def optimize_CALL_LOOPINVARIANT(self, op): - funcvalue = self.getvalue(op.args[0]) + funcvalue = self.getvalue(op.getarg(0)) if not funcvalue.is_constant(): self.emit_operation(op) return - key = make_hashable_int(op.args[0].getint()) + key = make_hashable_int(op.getarg(0).getint()) resvalue = self.optimizer.loop_invariant_results.get(key, None) if resvalue is not None: self.make_equal_to(op.result, resvalue) return # change the op to be a normal call, from the backend's point of view # there is no reason to have a separate operation for this - op.opnum = rop.CALL + op = op.copy_and_change(rop.CALL) self.emit_operation(op) resvalue = self.getvalue(op.result) self.optimizer.loop_invariant_results[key] = resvalue @@ -265,17 +268,17 @@ self.emit_operation(op) def optimize_INT_IS_TRUE(self, op): - if self.getvalue(op.args[0]) in self.optimizer.bool_boxes: - self.make_equal_to(op.result, self.getvalue(op.args[0])) + if self.getvalue(op.getarg(0)) in self.optimizer.bool_boxes: + self.make_equal_to(op.result, self.getvalue(op.getarg(0))) return - self._optimize_nullness(op, op.args[0], True) + self._optimize_nullness(op, op.getarg(0), True) def optimize_INT_IS_ZERO(self, op): - self._optimize_nullness(op, op.args[0], False) + self._optimize_nullness(op, op.getarg(0), False) def _optimize_oois_ooisnot(self, op, expect_isnot): - value0 = self.getvalue(op.args[0]) - value1 = self.getvalue(op.args[1]) + value0 = self.getvalue(op.getarg(0)) + value1 = self.getvalue(op.getarg(1)) if value0.is_virtual(): if value1.is_virtual(): intres = (value0 is value1) ^ expect_isnot @@ -285,9 +288,9 @@ elif value1.is_virtual(): self.make_constant_int(op.result, expect_isnot) elif value1.is_null(): - self._optimize_nullness(op, op.args[0], expect_isnot) + self._optimize_nullness(op, op.getarg(0), expect_isnot) elif value0.is_null(): - self._optimize_nullness(op, op.args[1], expect_isnot) + self._optimize_nullness(op, op.getarg(1), expect_isnot) elif value0 is value1: self.make_constant_int(op.result, not expect_isnot) else: @@ -308,10 +311,10 @@ self._optimize_oois_ooisnot(op, False) def optimize_INSTANCEOF(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) realclassbox = value.get_constant_class(self.optimizer.cpu) if realclassbox is not None: - checkclassbox = self.optimizer.cpu.typedescr2classbox(op.descr) + checkclassbox = self.optimizer.cpu.typedescr2classbox(op.getdescr()) result = self.optimizer.cpu.ts.subclassOf(self.optimizer.cpu, realclassbox, checkclassbox) Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/virtualize.py Thu Sep 23 16:53:32 2010 @@ -6,7 +6,7 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.optimizeutil import _findall from pypy.rlib.objectmodel import we_are_translated -from optimizer import * +from pypy.jit.metainterp.optimizeopt.optimizer import * class AbstractVirtualValue(OptValue): @@ -263,7 +263,7 @@ def setup(self, virtuals): if not virtuals: return - + inputargs = self.optimizer.loop.inputargs specnodes = self.optimizer.loop.token.specnodes assert len(inputargs) == len(specnodes) @@ -290,18 +290,18 @@ def optimize_JUMP(self, op): orgop = self.optimizer.loop.operations[-1] exitargs = [] - target_loop_token = orgop.descr + target_loop_token = orgop.getdescr() assert isinstance(target_loop_token, LoopToken) specnodes = target_loop_token.specnodes - assert len(op.args) == len(specnodes) + assert op.numargs() == len(specnodes) for i in range(len(specnodes)): - value = self.getvalue(op.args[i]) + value = self.getvalue(op.getarg(i)) specnodes[i].teardown_virtual_node(self, value, exitargs) - op.args = exitargs[:] + op = op.copy_and_change(op.getopnum(), args=exitargs[:]) self.emit_operation(op) def optimize_VIRTUAL_REF(self, op): - indexbox = op.args[1] + indexbox = op.getarg(1) # # get some constants vrefinfo = self.optimizer.metainterp_sd.virtualref_info @@ -327,17 +327,17 @@ # typically a PyPy PyFrame, and now is the end of its execution, so # forcing it now does not have catastrophic effects. vrefinfo = self.optimizer.metainterp_sd.virtualref_info - # op.args[1] should really never point to null here + # op.getarg(1) should really never point to null here # - set 'forced' to point to the real object - op1 = ResOperation(rop.SETFIELD_GC, op.args, None, + op1 = ResOperation(rop.SETFIELD_GC, op.getarglist(), None, descr = vrefinfo.descr_forced) self.optimize_SETFIELD_GC(op1) # - set 'virtual_token' to TOKEN_NONE - args = [op.args[0], ConstInt(vrefinfo.TOKEN_NONE)] + args = [op.getarg(0), ConstInt(vrefinfo.TOKEN_NONE)] op1 = ResOperation(rop.SETFIELD_GC, args, None, descr = vrefinfo.descr_virtual_token) self.optimize_SETFIELD_GC(op1) - # Note that in some cases the virtual in op.args[1] has been forced + # Note that in some cases the virtual in op.getarg(1) has been forced # already. This is fine. In that case, and *if* a residual # CALL_MAY_FORCE suddenly turns out to access it, then it will # trigger a ResumeGuardForcedDescr.handle_async_forcing() which @@ -345,11 +345,11 @@ # was already forced). def optimize_GETFIELD_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_virtual(): # optimizefindnode should ensure that fieldvalue is found assert isinstance(value, AbstractVirtualValue) - fieldvalue = value.getfield(op.descr, None) + fieldvalue = value.getfield(op.getdescr(), None) assert fieldvalue is not None self.make_equal_to(op.result, fieldvalue) else: @@ -362,36 +362,36 @@ optimize_GETFIELD_GC_PURE = optimize_GETFIELD_GC def optimize_SETFIELD_GC(self, op): - value = self.getvalue(op.args[0]) - fieldvalue = self.getvalue(op.args[1]) + value = self.getvalue(op.getarg(0)) + fieldvalue = self.getvalue(op.getarg(1)) if value.is_virtual(): - value.setfield(op.descr, fieldvalue) + value.setfield(op.getdescr(), fieldvalue) else: value.ensure_nonnull() ###self.heap_op_optimizer.optimize_SETFIELD_GC(op, value, fieldvalue) self.emit_operation(op) def optimize_NEW_WITH_VTABLE(self, op): - self.make_virtual(op.args[0], op.result, op) + self.make_virtual(op.getarg(0), op.result, op) def optimize_NEW(self, op): - self.make_vstruct(op.descr, op.result, op) + self.make_vstruct(op.getdescr(), op.result, op) def optimize_NEW_ARRAY(self, op): - sizebox = self.get_constant_box(op.args[0]) + sizebox = self.get_constant_box(op.getarg(0)) if sizebox is not None: # if the original 'op' did not have a ConstInt as argument, # build a new one with the ConstInt argument - if not isinstance(op.args[0], ConstInt): + if not isinstance(op.getarg(0), ConstInt): op = ResOperation(rop.NEW_ARRAY, [sizebox], op.result, - descr=op.descr) - self.make_varray(op.descr, sizebox.getint(), op.result, op) + descr=op.getdescr()) + self.make_varray(op.getdescr(), sizebox.getint(), op.result, op) else: ###self.optimize_default(op) self.emit_operation(op) def optimize_ARRAYLEN_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_virtual(): self.make_constant_int(op.result, value.getlength()) else: @@ -400,9 +400,9 @@ self.emit_operation(op) def optimize_GETARRAYITEM_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_virtual(): - indexbox = self.get_constant_box(op.args[1]) + indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: itemvalue = value.getitem(indexbox.getint()) self.make_equal_to(op.result, itemvalue) @@ -416,22 +416,22 @@ optimize_GETARRAYITEM_GC_PURE = optimize_GETARRAYITEM_GC def optimize_SETARRAYITEM_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_virtual(): - indexbox = self.get_constant_box(op.args[1]) + indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: - value.setitem(indexbox.getint(), self.getvalue(op.args[2])) + value.setitem(indexbox.getint(), self.getvalue(op.getarg(2))) return value.ensure_nonnull() ###self.heap_op_optimizer.optimize_SETARRAYITEM_GC(op, value, fieldvalue) self.emit_operation(op) def optimize_ARRAYCOPY(self, op): - source_value = self.getvalue(op.args[2]) - dest_value = self.getvalue(op.args[3]) - source_start_box = self.get_constant_box(op.args[4]) - dest_start_box = self.get_constant_box(op.args[5]) - length = self.get_constant_box(op.args[6]) + source_value = self.getvalue(op.getarg(2)) + dest_value = self.getvalue(op.getarg(3)) + source_start_box = self.get_constant_box(op.getarg(4)) + dest_start_box = self.get_constant_box(op.getarg(5)) + length = self.get_constant_box(op.getarg(6)) if (source_value.is_virtual() and source_start_box and dest_start_box and length and dest_value.is_virtual()): # XXX optimize the case where dest value is not virtual, @@ -444,13 +444,14 @@ return if length and length.getint() == 0: return # 0-length arraycopy - descr = op.args[0] + descr = op.getarg(0) assert isinstance(descr, AbstractDescr) - self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result, + args = op.getarglist()[1:] + self.emit_operation(ResOperation(rop.CALL, args, op.result, descr)) def propagate_forward(self, op): - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) Modified: pypy/branch/jitffi/pypy/jit/metainterp/pyjitpl.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/pyjitpl.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/pyjitpl.py Thu Sep 23 16:53:32 2010 @@ -159,7 +159,7 @@ if got_type == history.INT: self.registers_i[target_index] = resultbox elif got_type == history.REF: - #debug_print(' ->', + #debug_print(' ->', # llmemory.cast_ptr_to_adr(resultbox.getref_base())) self.registers_r[target_index] = resultbox elif got_type == history.FLOAT: @@ -446,7 +446,7 @@ def opimpl_newlist(self, structdescr, lengthdescr, itemsdescr, arraydescr, sizebox): sbox = self.metainterp.execute_and_record(rop.NEW, structdescr) - self.metainterp.execute_and_record(rop.SETFIELD_GC, lengthdescr, + self.metainterp.execute_and_record(rop.SETFIELD_GC, lengthdescr, sbox, sizebox) abox = self.metainterp.execute_and_record(rop.NEW_ARRAY, arraydescr, sizebox) @@ -1004,7 +1004,7 @@ resumedescr = compile.ResumeGuardDescr(metainterp_sd, original_greenkey) guard_op = metainterp.history.record(opnum, moreargs, None, - descr=resumedescr) + descr=resumedescr) virtualizable_boxes = None if metainterp.jitdriver_sd.virtualizable_info is not None: virtualizable_boxes = metainterp.virtualizable_boxes @@ -1463,7 +1463,7 @@ resbox = self._record_helper_nonpure_varargs(opnum, resbox, descr, argboxes) return resbox - def _record_helper_pure(self, opnum, resbox, descr, *argboxes): + def _record_helper_pure(self, opnum, resbox, descr, *argboxes): canfold = self._all_constants(*argboxes) if canfold: resbox = resbox.constbox() # ensure it is a Const @@ -1472,7 +1472,7 @@ resbox = resbox.nonconstbox() # ensure it is a Box return self._record_helper_nonpure_varargs(opnum, resbox, descr, list(argboxes)) - def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes): + def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes): canfold = self._all_constants_varargs(argboxes) if canfold: resbox = resbox.constbox() # ensure it is a Const @@ -1485,7 +1485,7 @@ assert resbox is None or isinstance(resbox, Box) # record the operation profiler = self.staticdata.profiler - profiler.count_ops(opnum, RECORDED_OPS) + profiler.count_ops(opnum, RECORDED_OPS) op = self.history.record(opnum, argboxes, resbox, descr) self.attach_debug_info(op) return resbox @@ -1667,7 +1667,7 @@ # Search in current_merge_points for original_boxes with compatible # green keys, representing the beginning of the same loop as the one - # we end now. + # we end now. num_green_args = self.jitdriver_sd.num_green_args for j in range(len(self.current_merge_points)-1, -1, -1): @@ -1922,7 +1922,7 @@ vrefbox = self.virtualref_boxes[i+1] # record VIRTUAL_REF_FINISH just before the current CALL_MAY_FORCE call_may_force_op = self.history.operations.pop() - assert call_may_force_op.opnum == rop.CALL_MAY_FORCE + assert call_may_force_op.getopnum() == rop.CALL_MAY_FORCE self.history.record(rop.VIRTUAL_REF_FINISH, [vrefbox, virtualbox], None) self.history.operations.append(call_may_force_op) @@ -2088,10 +2088,10 @@ """ Patch a CALL into a CALL_PURE. """ op = self.history.operations[-1] - assert op.opnum == rop.CALL + assert op.getopnum() == rop.CALL resbox_as_const = resbox.constbox() - for arg in op.args: - if not isinstance(arg, Const): + for i in range(op.numargs()): + if not isinstance(op.getarg(i), Const): break else: # all-constants: remove the CALL operation now and propagate a @@ -2100,8 +2100,8 @@ return resbox_as_const # not all constants (so far): turn CALL into CALL_PURE, which might # be either removed later by optimizeopt or turned back into CALL. - op.opnum = rop.CALL_PURE - op.args = [resbox_as_const] + op.args + newop = op.copy_and_change(rop.CALL_PURE, args=[resbox_as_const]+op.getarglist()) + self.history.operations[-1] = newop return resbox def direct_assembler_call(self, targetjitdriver_sd): @@ -2109,10 +2109,11 @@ patching the CALL_MAY_FORCE that occurred just now. """ op = self.history.operations.pop() - assert op.opnum == rop.CALL_MAY_FORCE + assert op.getopnum() == rop.CALL_MAY_FORCE num_green_args = targetjitdriver_sd.num_green_args - greenargs = op.args[1:num_green_args+1] - args = op.args[num_green_args+1:] + arglist = op.getarglist() + greenargs = arglist[1:num_green_args+1] + args = arglist[num_green_args+1:] assert len(args) == targetjitdriver_sd.num_red_args vinfo = targetjitdriver_sd.virtualizable_info if vinfo is not None: @@ -2122,9 +2123,7 @@ # ^^^ and not "+=", which makes 'args' a resizable list warmrunnerstate = targetjitdriver_sd.warmstate token = warmrunnerstate.get_assembler_token(greenargs, args) - op.opnum = rop.CALL_ASSEMBLER - op.args = args - op.descr = token + op = op.copy_and_change(rop.CALL_ASSEMBLER, args=args, descr=token) self.history.operations.append(op) # ____________________________________________________________ Modified: pypy/branch/jitffi/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/resoperation.py Thu Sep 23 16:53:32 2010 @@ -1,42 +1,90 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import make_sure_not_resized -class ResOperation(object): - """The central ResOperation class, representing one operation.""" +def ResOperation(opnum, args, result, descr=None): + cls = opclasses[opnum] + op = cls(result) + op.initarglist(args) + if descr is not None: + assert isinstance(op, ResOpWithDescr) + op.setdescr(descr) + return op + - # for 'guard_*' - fail_args = None +class AbstractResOp(object): + """The central ResOperation class, representing one operation.""" # debug name = "" pc = 0 - def __init__(self, opnum, args, result, descr=None): - make_sure_not_resized(args) - assert isinstance(opnum, int) - self.opnum = opnum - self.args = list(args) - make_sure_not_resized(self.args) - assert not isinstance(result, list) + def __init__(self, result): self.result = result - self.setdescr(descr) + + # methods implemented by each concrete class + # ------------------------------------------ + + def getopnum(self): + raise NotImplementedError + + # methods implemented by the arity mixins + # --------------------------------------- + + def initarglist(self, args): + "This is supposed to be called only just after the ResOp has been created" + raise NotImplementedError + + def getarglist(self): + raise NotImplementedError + + def getarg(self, i): + raise NotImplementedError + + def setarg(self, i, box): + raise NotImplementedError + + def numargs(self): + raise NotImplementedError + + + # methods implemented by GuardResOp + # --------------------------------- + + def getfailargs(self): + return None + + def setfailargs(self, fail_args): + raise NotImplementedError + + # methods implemented by ResOpWithDescr + # ------------------------------------- + + def getdescr(self): + return None def setdescr(self, descr): - # for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt - # instance provided by the backend holding details about the type - # of the operation. It must inherit from AbstractDescr. The - # backend provides it with cpu.fielddescrof(), cpu.arraydescrof(), - # cpu.calldescrof(), and cpu.typedescrof(). - from pypy.jit.metainterp.history import check_descr - check_descr(descr) - self.descr = descr + raise NotImplementedError + + # common methods + # -------------- + + def copy_and_change(self, opnum, args=None, result=None, descr=None): + "shallow copy: the returned operation is meant to be used in place of self" + if args is None: + args = self.getarglist() + if result is None: + result = self.result + if descr is None: + descr = self.getdescr() + newop = ResOperation(opnum, args, result, descr) + return newop def clone(self): - descr = self.descr + args = self.getarglist() + descr = self.getdescr() if descr is not None: descr = descr.clone_if_mutable() - op = ResOperation(self.opnum, self.args, self.result, descr) - op.fail_args = self.fail_args + op = ResOperation(self.getopnum(), args, self.result, descr) if not we_are_translated(): op.name = self.name op.pc = self.pc @@ -55,82 +103,271 @@ prefix = "%s:%s " % (self.name, self.pc) else: prefix = "" - if self.descr is None or we_are_translated(): + args = self.getarglist() + descr = self.getdescr() + if descr is None or we_are_translated(): return '%s%s%s(%s)' % (prefix, sres, self.getopname(), - ', '.join([str(a) for a in self.args])) + ', '.join([str(a) for a in args])) else: return '%s%s%s(%s, descr=%r)' % (prefix, sres, self.getopname(), - ', '.join([str(a) for a in self.args]), self.descr) + ', '.join([str(a) for a in args]), descr) def getopname(self): try: - return opname[self.opnum].lower() + return opname[self.getopnum()].lower() except KeyError: - return '<%d>' % self.opnum + return '<%d>' % self.getopnum() def is_guard(self): - return rop._GUARD_FIRST <= self.opnum <= rop._GUARD_LAST + return rop._GUARD_FIRST <= self.getopnum() <= rop._GUARD_LAST def is_foldable_guard(self): - return rop._GUARD_FOLDABLE_FIRST <= self.opnum <= rop._GUARD_FOLDABLE_LAST + return rop._GUARD_FOLDABLE_FIRST <= self.getopnum() <= rop._GUARD_FOLDABLE_LAST def is_guard_exception(self): - return (self.opnum == rop.GUARD_EXCEPTION or - self.opnum == rop.GUARD_NO_EXCEPTION) + return (self.getopnum() == rop.GUARD_EXCEPTION or + self.getopnum() == rop.GUARD_NO_EXCEPTION) def is_guard_overflow(self): - return (self.opnum == rop.GUARD_OVERFLOW or - self.opnum == rop.GUARD_NO_OVERFLOW) + return (self.getopnum() == rop.GUARD_OVERFLOW or + self.getopnum() == rop.GUARD_NO_OVERFLOW) def is_always_pure(self): - return rop._ALWAYS_PURE_FIRST <= self.opnum <= rop._ALWAYS_PURE_LAST + return rop._ALWAYS_PURE_FIRST <= self.getopnum() <= rop._ALWAYS_PURE_LAST def has_no_side_effect(self): - return rop._NOSIDEEFFECT_FIRST <= self.opnum <= rop._NOSIDEEFFECT_LAST + return rop._NOSIDEEFFECT_FIRST <= self.getopnum() <= rop._NOSIDEEFFECT_LAST def can_raise(self): - return rop._CANRAISE_FIRST <= self.opnum <= rop._CANRAISE_LAST + return rop._CANRAISE_FIRST <= self.getopnum() <= rop._CANRAISE_LAST def is_ovf(self): - return rop._OVF_FIRST <= self.opnum <= rop._OVF_LAST + return rop._OVF_FIRST <= self.getopnum() <= rop._OVF_LAST def is_comparison(self): return self.is_always_pure() and self.returns_bool_result() def is_final(self): - return rop._FINAL_FIRST <= self.opnum <= rop._FINAL_LAST + return rop._FINAL_FIRST <= self.getopnum() <= rop._FINAL_LAST def returns_bool_result(self): - opnum = self.opnum + opnum = self.getopnum() if we_are_translated(): assert opnum >= 0 elif opnum < 0: return False # for tests return opboolresult[opnum] + +# =================== +# Top of the hierachy +# =================== + +class PlainResOp(AbstractResOp): + pass + +class ResOpWithDescr(AbstractResOp): + + _descr = None + + def getdescr(self): + return self._descr + + def setdescr(self, descr): + # for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt + # instance provided by the backend holding details about the type + # of the operation. It must inherit from AbstractDescr. The + # backend provides it with cpu.fielddescrof(), cpu.arraydescrof(), + # cpu.calldescrof(), and cpu.typedescrof(). + from pypy.jit.metainterp.history import check_descr + check_descr(descr) + self._descr = descr + +class GuardResOp(ResOpWithDescr): + + _fail_args = None + + def getfailargs(self): + return self._fail_args + + def setfailargs(self, fail_args): + self._fail_args = fail_args + + def copy_and_change(self, opnum, args=None, result=None, descr=None): + newop = AbstractResOp.copy_and_change(self, opnum, args, result, descr) + newop.setfailargs(self.getfailargs()) + return newop + + def clone(self): + newop = AbstractResOp.clone(self) + newop.setfailargs(self.getfailargs()) + return newop + + +# ============ +# arity mixins +# ============ + +class NullaryOp(object): + _mixin_ = True + + def initarglist(self, args): + assert len(args) == 0 + + def getarglist(self): + return [] + + def numargs(self): + return 0 + + def getarg(self, i): + raise IndexError + + def setarg(self, i, box): + raise IndexError + + +class UnaryOp(object): + _mixin_ = True + _arg0 = None + + def initarglist(self, args): + assert len(args) == 1 + self._arg0, = args + + def getarglist(self): + return [self._arg0] + + def numargs(self): + return 1 + + def getarg(self, i): + if i == 0: + return self._arg0 + else: + raise IndexError + + def setarg(self, i, box): + if i == 0: + self._arg0 = box + else: + raise IndexError + + +class BinaryOp(object): + _mixin_ = True + _arg0 = None + _arg1 = None + + def initarglist(self, args): + assert len(args) == 2 + self._arg0, self._arg1 = args + + def getarglist(self): + return [self._arg0, self._arg1, self._arg2] + + def numargs(self): + return 2 + + def getarg(self, i): + if i == 0: + return self._arg0 + elif i == 1: + return self._arg1 + else: + raise IndexError + + def setarg(self, i, box): + if i == 0: + self._arg0 = box + elif i == 1: + self._arg1 = box + else: + raise IndexError + + def getarglist(self): + return [self._arg0, self._arg1] + + +class TernaryOp(object): + _mixin_ = True + _arg0 = None + _arg1 = None + _arg2 = None + + def initarglist(self, args): + assert len(args) == 3 + self._arg0, self._arg1, self._arg2 = args + + def getarglist(self): + return [self._arg0, self._arg1, self._arg2] + + def numargs(self): + return 3 + + def getarg(self, i): + if i == 0: + return self._arg0 + elif i == 1: + return self._arg1 + elif i == 2: + return self._arg2 + else: + raise IndexError + + def setarg(self, i, box): + if i == 0: + self._arg0 = box + elif i == 1: + self._arg1 = box + elif i == 2: + self._arg2 = box + else: + raise IndexError + +class N_aryOp(object): + _mixin_ = True + _args = None + + def initarglist(self, args): + self._args = args + + def getarglist(self): + return self._args + + def numargs(self): + return len(self._args) + + def getarg(self, i): + return self._args[i] + + def setarg(self, i, box): + self._args[i] = box + + # ____________________________________________________________ _oplist = [ '_FINAL_FIRST', - 'JUMP', - 'FINISH', + 'JUMP/*d', + 'FINISH/*d', '_FINAL_LAST', '_GUARD_FIRST', '_GUARD_FOLDABLE_FIRST', - 'GUARD_TRUE', - 'GUARD_FALSE', - 'GUARD_VALUE', - 'GUARD_CLASS', - 'GUARD_NONNULL', - 'GUARD_ISNULL', - 'GUARD_NONNULL_CLASS', + 'GUARD_TRUE/1d', + 'GUARD_FALSE/1d', + 'GUARD_VALUE/2d', + 'GUARD_CLASS/2d', + 'GUARD_NONNULL/1d', + 'GUARD_ISNULL/1d', + 'GUARD_NONNULL_CLASS/2d', '_GUARD_FOLDABLE_LAST', - 'GUARD_NO_EXCEPTION', - 'GUARD_EXCEPTION', - 'GUARD_NO_OVERFLOW', - 'GUARD_OVERFLOW', - 'GUARD_NOT_FORCED', + 'GUARD_NO_EXCEPTION/0d', + 'GUARD_EXCEPTION/1d', + 'GUARD_NO_OVERFLOW/0d', + 'GUARD_OVERFLOW/0d', + 'GUARD_NOT_FORCED/0d', '_GUARD_LAST', # ----- end of guard operations ----- '_NOSIDEEFFECT_FIRST', # ----- start of no_side_effect operations ----- @@ -218,20 +455,20 @@ 'STRSETITEM/3', 'UNICODESETITEM/3', 'NEWUNICODE/1', - #'RUNTIMENEW/1', # ootype operation - 'COND_CALL_GC_WB', # [objptr, newvalue] (for the write barrier) + #'RUNTIMENEW/1', # ootype operation + 'COND_CALL_GC_WB/2d', # [objptr, newvalue] (for the write barrier) 'DEBUG_MERGE_POINT/1', # debugging only 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend - 'CALL_C', # call directly C code from here (a function addres comes first) + 'CALL_C/*', # call directly C code from here (a function addres comes first) '_CANRAISE_FIRST', # ----- start of can_raise operations ----- - 'CALL', - 'CALL_ASSEMBLER', # call already compiled assembler - 'CALL_MAY_FORCE', - 'CALL_LOOPINVARIANT', + 'CALL/*d', + 'CALL_ASSEMBLER/*d', # call already compiled assembler + 'CALL_MAY_FORCE/*d', + 'CALL_LOOPINVARIANT/*d', #'OOSEND', # ootype operation #'OOSEND_PURE', # ootype operation - 'CALL_PURE', # removed before it's passed to the backend + 'CALL_PURE/*d', # removed before it's passed to the backend # CALL_PURE(result, func, arg_1,..,arg_n) '_CANRAISE_LAST', # ----- end of can_raise operations ----- @@ -248,6 +485,7 @@ class rop(object): pass +opclasses = [] # mapping numbers to the concrete ResOp class opname = {} # mapping numbers to the original names, for debugging oparity = [] # mapping numbers to the arity of the operation or -1 opwithdescr = [] # mapping numbers to a flag "takes a descr" @@ -262,16 +500,62 @@ name, arity = name.split('/') withdescr = 'd' in arity boolresult = 'b' in arity - arity = int(arity.rstrip('db')) + arity = arity.rstrip('db') + if arity == '*': + arity = -1 + else: + arity = int(arity) else: arity, withdescr, boolresult = -1, True, False # default setattr(rop, name, i) if not name.startswith('_'): opname[i] = name + cls = create_class_for_op(name, i, arity, withdescr) + else: + cls = None + opclasses.append(cls) oparity.append(arity) opwithdescr.append(withdescr) opboolresult.append(boolresult) - assert len(oparity)==len(opwithdescr)==len(opboolresult)==len(_oplist) + assert len(opclasses)==len(oparity)==len(opwithdescr)==len(opboolresult)==len(_oplist) + +def get_base_class(mixin, base): + try: + return get_base_class.cache[(mixin, base)] + except KeyError: + arity_name = mixin.__name__[:-2] # remove the trailing "Op" + name = arity_name + base.__name__ # something like BinaryPlainResOp + bases = (mixin, base) + cls = type(name, bases, {}) + get_base_class.cache[(mixin, base)] = cls + return cls +get_base_class.cache = {} + +def create_class_for_op(name, opnum, arity, withdescr): + arity2mixin = { + 0: NullaryOp, + 1: UnaryOp, + 2: BinaryOp, + 3: TernaryOp + } + + is_guard = name.startswith('GUARD') + if is_guard: + assert withdescr + baseclass = GuardResOp + elif withdescr: + baseclass = ResOpWithDescr + else: + baseclass = PlainResOp + mixin = arity2mixin.get(arity, N_aryOp) + + def getopnum(self): + return opnum + + cls_name = '%s_OP' % name + bases = (get_base_class(mixin, baseclass),) + dic = {'getopnum': getopnum} + return type(cls_name, bases, dic) setup(__name__ == '__main__') # print out the table when run directly del _oplist Modified: pypy/branch/jitffi/pypy/jit/metainterp/simple_optimize.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/simple_optimize.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/simple_optimize.py Thu Sep 23 16:53:32 2010 @@ -11,15 +11,17 @@ from pypy.jit.metainterp.history import AbstractDescr # change ARRAYCOPY to call, so we don't have to pass around # unnecessary information to the backend. Do the same with VIRTUAL_REF_*. - if op.opnum == rop.ARRAYCOPY: - descr = op.args[0] + if op.getopnum() == rop.ARRAYCOPY: + descr = op.getarg(0) assert isinstance(descr, AbstractDescr) - op = ResOperation(rop.CALL, op.args[1:], op.result, descr=descr) - elif op.opnum == rop.CALL_PURE: - op = ResOperation(rop.CALL, op.args[1:], op.result, op.descr) - elif op.opnum == rop.VIRTUAL_REF: - op = ResOperation(rop.SAME_AS, [op.args[0]], op.result) - elif op.opnum == rop.VIRTUAL_REF_FINISH: + args = op.getarglist()[1:] + op = ResOperation(rop.CALL, args, op.result, descr=descr) + elif op.getopnum() == rop.CALL_PURE: + args = op.getarglist()[1:] + op = ResOperation(rop.CALL, args, op.result, op.getdescr()) + elif op.getopnum() == rop.VIRTUAL_REF: + op = ResOperation(rop.SAME_AS, [op.getarg(0)], op.result) + elif op.getopnum() == rop.VIRTUAL_REF_FINISH: return [] return [op] @@ -36,7 +38,7 @@ newoperations = [] for op in loop.operations: if op.is_guard(): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) modifier = resume.ResumeDataVirtualAdder(descr, memo) newboxes = modifier.finish(EMPTY_VALUES) Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/oparser.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/test/oparser.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/test/oparser.py Thu Sep 23 16:53:32 2010 @@ -6,7 +6,7 @@ from pypy.jit.metainterp.history import TreeLoop, BoxInt, ConstInt,\ ConstObj, ConstPtr, Box, BasicFailDescr, BoxFloat, ConstFloat,\ LoopToken -from pypy.jit.metainterp.resoperation import rop, ResOperation +from pypy.jit.metainterp.resoperation import rop, ResOperation, ResOpWithDescr, N_aryOp from pypy.jit.metainterp.typesystem import llhelper from pypy.jit.codewriter.heaptracker import adr2int from pypy.rpython.lltypesystem import lltype, llmemory @@ -16,17 +16,29 @@ class ParseError(Exception): pass - class Boxes(object): pass +class ESCAPE_OP(N_aryOp, ResOpWithDescr): + + OPNUM = -123 + + def __init__(self, opnum, args, result, descr=None): + assert opnum == self.OPNUM + self.result = result + self.initarglist(args) + self.setdescr(descr) + + def getopnum(self): + return self.OPNUM + class ExtendedTreeLoop(TreeLoop): def getboxes(self): def opboxes(operations): for op in operations: yield op.result - for box in op.args: + for box in op.getarglist(): yield box def allboxes(): for box in self.inputargs: @@ -171,7 +183,7 @@ opnum = getattr(rop, opname.upper()) except AttributeError: if opname == 'escape': - opnum = -123 + opnum = ESCAPE_OP.OPNUM else: raise ParseError("unknown op: %s" % opname) endnum = line.rfind(')') @@ -228,6 +240,12 @@ descr = self.looptoken return opnum, args, descr, fail_args + def create_op(self, opnum, args, result, descr): + if opnum == ESCAPE_OP.OPNUM: + return ESCAPE_OP(opnum, args, result, descr) + else: + return ResOperation(opnum, args, result, descr) + def parse_result_op(self, line): res, op = line.split("=", 1) res = res.strip() @@ -237,14 +255,16 @@ raise ParseError("Double assign to var %s in line: %s" % (res, line)) rvar = self.box_for_var(res) self.vars[res] = rvar - res = ResOperation(opnum, args, rvar, descr) - res.fail_args = fail_args + res = self.create_op(opnum, args, rvar, descr) + if fail_args is not None: + res.setfailargs(fail_args) return res def parse_op_no_result(self, line): opnum, args, descr, fail_args = self.parse_op(line) - res = ResOperation(opnum, args, None, descr) - res.fail_args = fail_args + res = self.create_op(opnum, args, None, descr) + if fail_args is not None: + res.setfailargs(fail_args) return res def parse_next_op(self, line): Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_basic.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/test/test_basic.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_basic.py Thu Sep 23 16:53:32 2010 @@ -296,7 +296,7 @@ found = 0 for op in get_stats().loops[0]._all_operations(): if op.getopname() == 'guard_true': - liveboxes = op.fail_args + liveboxes = op.getfailargs() assert len(liveboxes) == 3 for box in liveboxes: assert isinstance(box, history.BoxInt) Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_logger.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/test/test_logger.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_logger.py Thu Sep 23 16:53:32 2010 @@ -100,8 +100,8 @@ debug_merge_point("info") ''' loop, oloop = self.reparse(inp, check_equal=False) - assert loop.operations[0].args[0]._get_str() == 'info' - assert oloop.operations[0].args[0]._get_str() == 'info' + assert loop.operations[0].getarg(0)._get_str() == 'info' + assert oloop.operations[0].getarg(0)._get_str() == 'info' def test_floats(self): inp = ''' Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_loop.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/test/test_loop.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_loop.py Thu Sep 23 16:53:32 2010 @@ -178,7 +178,7 @@ found = 0 for op in get_stats().loops[0]._all_operations(): if op.getopname() == 'guard_true': - liveboxes = op.fail_args + liveboxes = op.getfailargs() assert len(liveboxes) == 2 # x, y (in some order) assert isinstance(liveboxes[0], history.BoxInt) assert isinstance(liveboxes[1], history.BoxInt) Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_oparser.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/test/test_oparser.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_oparser.py Thu Sep 23 16:53:32 2010 @@ -16,10 +16,10 @@ """ loop = parse(x) assert len(loop.operations) == 3 - assert [op.opnum for op in loop.operations] == [rop.INT_ADD, rop.INT_SUB, + assert [op.getopnum() for op in loop.operations] == [rop.INT_ADD, rop.INT_SUB, rop.FINISH] assert len(loop.inputargs) == 2 - assert loop.operations[-1].descr + assert loop.operations[-1].getdescr() def test_const_ptr_subops(): x = """ @@ -30,8 +30,8 @@ vtable = lltype.nullptr(S) loop = parse(x, None, locals()) assert len(loop.operations) == 1 - assert loop.operations[0].descr - assert loop.operations[0].fail_args == [] + assert loop.operations[0].getdescr() + assert loop.operations[0].getfailargs() == [] def test_descr(): class Xyz(AbstractDescr): @@ -43,7 +43,7 @@ """ stuff = Xyz() loop = parse(x, None, locals()) - assert loop.operations[0].descr is stuff + assert loop.operations[0].getdescr() is stuff def test_after_fail(): x = """ @@ -64,7 +64,7 @@ """ stuff = Xyz() loop = parse(x, None, locals()) - assert loop.operations[0].descr is stuff + assert loop.operations[0].getdescr() is stuff def test_boxname(): x = """ @@ -111,7 +111,7 @@ TP = lltype.GcArray(lltype.Signed) NULL = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.nullptr(TP)) loop = parse(x, None, {'func_ptr' : NULL}) - assert loop.operations[0].args[0].value == NULL + assert loop.operations[0].getarg(0).value == NULL def test_jump_target(): x = ''' @@ -119,7 +119,7 @@ jump() ''' loop = parse(x) - assert loop.operations[0].descr is loop.token + assert loop.operations[0].getdescr() is loop.token def test_jump_target_other(): looptoken = LoopToken() @@ -128,7 +128,7 @@ jump(descr=looptoken) ''' loop = parse(x, namespace=locals()) - assert loop.operations[0].descr is looptoken + assert loop.operations[0].getdescr() is looptoken def test_floats(): x = ''' @@ -136,7 +136,7 @@ f1 = float_add(f0, 3.5) ''' loop = parse(x) - assert isinstance(loop.operations[0].args[0], BoxFloat) + assert isinstance(loop.operations[0].getarg(0), BoxFloat) def test_debug_merge_point(): x = ''' @@ -147,10 +147,10 @@ debug_merge_point('(stuff) #1') ''' loop = parse(x) - assert loop.operations[0].args[0]._get_str() == 'info' - assert loop.operations[1].args[0]._get_str() == 'info' - assert loop.operations[2].args[0]._get_str() == " info" - assert loop.operations[3].args[0]._get_str() == "(stuff) #1" + assert loop.operations[0].getarg(0)._get_str() == 'info' + assert loop.operations[1].getarg(0)._get_str() == 'info' + assert loop.operations[2].getarg(0)._get_str() == " info" + assert loop.operations[3].getarg(0)._get_str() == "(stuff) #1" def test_descr_with_obj_print(): Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py Thu Sep 23 16:53:32 2010 @@ -33,7 +33,7 @@ self.profiler = EmptyProfiler() self.options = Fake() self.globaldata = Fake() - + def test_store_final_boxes_in_guard(): from pypy.jit.metainterp.compile import ResumeGuardDescr from pypy.jit.metainterp.resume import tag, TAGBOX @@ -42,7 +42,7 @@ opt = optimizeopt.Optimizer(FakeMetaInterpStaticData(LLtypeMixin.cpu), None) fdescr = ResumeGuardDescr(None, None) - op = ResOperation(rop.GUARD_TRUE, [], None, descr=fdescr) + op = ResOperation(rop.GUARD_TRUE, ['dummy'], None, descr=fdescr) # setup rd data fi0 = resume.FrameInfo(None, "code0", 11) fdescr.rd_frame_info_list = resume.FrameInfo(fi0, "code1", 33) @@ -50,11 +50,11 @@ fdescr.rd_snapshot = resume.Snapshot(snapshot0, [b1]) # opt.store_final_boxes_in_guard(op) - if op.fail_args == [b0, b1]: + if op.getfailargs() == [b0, b1]: assert fdescr.rd_numb.nums == [tag(1, TAGBOX)] assert fdescr.rd_numb.prev.nums == [tag(0, TAGBOX)] else: - assert op.fail_args == [b1, b0] + assert op.getfailargs() == [b1, b0] assert fdescr.rd_numb.nums == [tag(0, TAGBOX)] assert fdescr.rd_numb.prev.nums == [tag(1, TAGBOX)] assert fdescr.rd_virtuals is None @@ -75,7 +75,7 @@ assert lst3 == [LLtypeMixin.valuedescr] lst4 = virt1._get_field_descr_list() assert lst3 is lst4 - + virt2 = virtualize.AbstractVirtualStructValue(opt, None) lst5 = virt2._get_field_descr_list() assert lst5 is lst1 @@ -140,24 +140,26 @@ print '%-39s| %s' % (txt1[:39], txt2[:39]) txt1 = txt1[39:] txt2 = txt2[39:] - assert op1.opnum == op2.opnum - assert len(op1.args) == len(op2.args) - for x, y in zip(op1.args, op2.args): + assert op1.getopnum() == op2.getopnum() + assert op1.numargs() == op2.numargs() + for i in range(op1.numargs()): + x = op1.getarg(i) + y = op2.getarg(i) assert x == remap.get(y, y) if op2.result in remap: assert op1.result == remap[op2.result] else: remap[op2.result] = op1.result - if op1.opnum != rop.JUMP: # xxx obscure - assert op1.descr == op2.descr - if op1.fail_args or op2.fail_args: - assert len(op1.fail_args) == len(op2.fail_args) + if op1.getopnum() != rop.JUMP: # xxx obscure + assert op1.getdescr() == op2.getdescr() + if op1.getfailargs() or op2.getfailargs(): + assert len(op1.getfailargs()) == len(op2.getfailargs()) if strict_fail_args: - for x, y in zip(op1.fail_args, op2.fail_args): + for x, y in zip(op1.getfailargs(), op2.getfailargs()): assert x == remap.get(y, y) else: - fail_args1 = set(op1.fail_args) - fail_args2 = set([remap.get(y, y) for y in op2.fail_args]) + fail_args1 = set(op1.getfailargs()) + fail_args2 = set([remap.get(y, y) for y in op2.getfailargs()]) assert fail_args1 == fail_args2 assert len(oplist1) == len(oplist2) print '-'*57 @@ -209,7 +211,7 @@ self.metainterp_sd = metainterp_sd self.original_greenkey = original_greenkey def store_final_boxes(self, op, boxes): - op.fail_args = boxes + op.setfailargs(boxes) def __eq__(self, other): return type(self) is type(other) # xxx obscure @@ -489,7 +491,7 @@ jump() """ self.optimize_loop(ops, 'Constant(myptr)', expected) - + def test_ooisnull_oononnull_1(self): ops = """ [p0] @@ -842,7 +844,7 @@ jump(f, f1) """ self.optimize_loop(ops, 'Not, Virtual(node_vtable, floatdescr=Not)', - expected, checkspecnodes=False) + expected, checkspecnodes=False) def test_virtual_2(self): ops = """ @@ -2171,7 +2173,7 @@ jump(i1, i0) """ self.optimize_loop(ops, 'Not, Not', expected) - + def test_fold_partially_constant_ops(self): ops = """ [i0] @@ -2183,7 +2185,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + ops = """ [i0] i1 = int_add(i0, 0) @@ -2194,7 +2196,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + ops = """ [i0] i1 = int_add(0, i0) @@ -2205,7 +2207,44 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + + def test_fold_partially_constant_ops_ovf(self): + ops = """ + [i0] + i1 = int_sub_ovf(i0, 0) + guard_no_overflow() [] + jump(i1) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + ops = """ + [i0] + i1 = int_add_ovf(i0, 0) + guard_no_overflow() [] + jump(i1) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + ops = """ + [i0] + i1 = int_add_ovf(0, i0) + guard_no_overflow() [] + jump(i1) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + # ---------- def make_fail_descr(self): @@ -2324,8 +2363,8 @@ from pypy.jit.metainterp.test.test_resume import ResumeDataFakeReader from pypy.jit.metainterp.test.test_resume import MyMetaInterp guard_op, = [op for op in self.loop.operations if op.is_guard()] - fail_args = guard_op.fail_args - fdescr = guard_op.descr + fail_args = guard_op.getfailargs() + fdescr = guard_op.getdescr() assert fdescr.guard_opnum == guard_opnum reader = ResumeDataFakeReader(fdescr, fail_args, MyMetaInterp(self.cpu)) @@ -3119,7 +3158,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_noguard(self): ops = """ [i0] @@ -3134,7 +3173,7 @@ jump(i2) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_noopt(self): ops = """ [i0] @@ -3153,7 +3192,7 @@ jump(4) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_rev(self): ops = """ [i0] @@ -3170,7 +3209,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_tripple(self): ops = """ [i0] @@ -3189,7 +3228,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_add(self): ops = """ [i0] @@ -3204,11 +3243,11 @@ [i0] i1 = int_lt(i0, 4) guard_true(i1) [] - i2 = int_add(i0, 10) + i2 = int_add(i0, 10) jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_add_before(self): ops = """ [i0] @@ -3227,7 +3266,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_add_ovf(self): ops = """ [i0] @@ -3243,11 +3282,11 @@ [i0] i1 = int_lt(i0, 4) guard_true(i1) [] - i2 = int_add(i0, 10) + i2 = int_add(i0, 10) jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_add_ovf_before(self): ops = """ [i0] @@ -3268,7 +3307,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_sub(self): ops = """ [i0] @@ -3283,11 +3322,11 @@ [i0] i1 = int_lt(i0, 4) guard_true(i1) [] - i2 = int_sub(i0, 10) + i2 = int_sub(i0, 10) jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_lt_sub_before(self): ops = """ [i0] @@ -3306,7 +3345,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_ltle(self): ops = """ [i0] @@ -3357,7 +3396,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_gtge(self): ops = """ [i0] @@ -3374,7 +3413,7 @@ jump(i0) """ self.optimize_loop(ops, 'Not', expected) - + def test_bound_gegt(self): ops = """ [i0] @@ -3414,6 +3453,42 @@ """ self.optimize_loop(ops, 'Not', expected) + def test_bound_arraylen(self): + ops = """ + [i0, p0] + p1 = new_array(i0, descr=arraydescr) + i1 = arraylen_gc(p1) + i2 = int_gt(i1, -1) + guard_true(i2) [] + setarrayitem_gc(p0, 0, p1) + jump(i0, p0) + """ + # The dead arraylen_gc will be eliminated by the backend. + expected = """ + [i0, p0] + p1 = new_array(i0, descr=arraydescr) + i1 = arraylen_gc(p1) + setarrayitem_gc(p0, 0, p1) + jump(i0, p0) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_bound_strlen(self): + ops = """ + [p0] + i0 = strlen(p0) + i1 = int_ge(i0, 0) + guard_true(i1) [] + jump(p0) + """ + # The dead strlen will be eliminated be the backend. + expected = """ + [p0] + i0 = strlen(p0) + jump(p0) + """ + self.optimize_loop(ops, 'Not', expected) + def test_addsub_const(self): ops = """ [i0] @@ -3558,7 +3633,7 @@ i14 = int_gt(i1, 10) guard_true(i14) [] i15 = int_ge(i1, 20) - guard_true(i15) [] + guard_true(i15) [] jump(i1) """ expected = """ @@ -3571,7 +3646,7 @@ i14 = int_gt(i1, 10) guard_true(i14) [] i15 = int_ge(i1, 20) - guard_true(i15) [] + guard_true(i15) [] jump(i1) """ self.optimize_loop(ops, 'Not', expected) @@ -3838,6 +3913,7 @@ self.optimize_loop(ops, 'Not, Not, Not', expected) + ##class TestOOtype(BaseTestOptimizeOpt, OOtypeMixin): ## def test_instanceof(self): @@ -3852,7 +3928,7 @@ ## jump(1) ## """ ## self.optimize_loop(ops, 'Not', expected) - + ## def test_instanceof_guard_class(self): ## ops = """ ## [i0, p0] Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_recursive.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/test/test_recursive.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_recursive.py Thu Sep 23 16:53:32 2010 @@ -319,8 +319,8 @@ for loop in get_stats().loops: assert len(loop.operations) <= length + 5 # because we only check once per metainterp bytecode for op in loop.operations: - if op.is_guard() and hasattr(op.descr, '_debug_suboperations'): - assert len(op.descr._debug_suboperations) <= length + 5 + if op.is_guard() and hasattr(op.getdescr(), '_debug_suboperations'): + assert len(op.getdescr()._debug_suboperations) <= length + 5 def test_inline_trace_limit(self): myjitdriver = JitDriver(greens=[], reds=['n']) Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_virtualref.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/test/test_virtualref.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_virtualref.py Thu Sep 23 16:53:32 2010 @@ -71,11 +71,11 @@ # ops = self.metainterp.staticdata.stats.loops[0].operations [guard_op] = [op for op in ops - if op.opnum == rop.GUARD_NOT_FORCED] - bxs1 = [box for box in guard_op.fail_args + if op.getopnum() == rop.GUARD_NOT_FORCED] + bxs1 = [box for box in guard_op.getfailargs() if str(box._getrepr_()).endswith('.X')] assert len(bxs1) == 1 - bxs2 = [box for box in guard_op.fail_args + bxs2 = [box for box in guard_op.getfailargs() if str(box._getrepr_()).endswith('JitVirtualRef')] assert len(bxs2) == 1 JIT_VIRTUAL_REF = self.vrefinfo.JIT_VIRTUAL_REF @@ -84,11 +84,11 @@ # try reloading from blackhole.py's point of view from pypy.jit.metainterp.resume import ResumeDataDirectReader cpu = self.metainterp.cpu - cpu.get_latest_value_count = lambda : len(guard_op.fail_args) - cpu.get_latest_value_int = lambda i:guard_op.fail_args[i].getint() - cpu.get_latest_value_ref = lambda i:guard_op.fail_args[i].getref_base() + cpu.get_latest_value_count = lambda : len(guard_op.getfailargs()) + cpu.get_latest_value_int = lambda i:guard_op.getfailargs()[i].getint() + cpu.get_latest_value_ref = lambda i:guard_op.getfailargs()[i].getref_base() cpu.clear_latest_values = lambda count: None - resumereader = ResumeDataDirectReader(cpu, guard_op.descr) + resumereader = ResumeDataDirectReader(cpu, guard_op.getdescr()) vrefinfo = self.metainterp.staticdata.virtualref_info lst = [] vrefinfo.continue_tracing = lambda vref, virtual: \ @@ -100,7 +100,7 @@ lst[0][0]) # assert correct type # # try reloading from pyjitpl's point of view - self.metainterp.rebuild_state_after_failure(guard_op.descr) + self.metainterp.rebuild_state_after_failure(guard_op.getdescr()) assert len(self.metainterp.framestack) == 1 assert len(self.metainterp.virtualref_boxes) == 2 assert self.metainterp.virtualref_boxes[0].value == bxs1[0].value Modified: pypy/branch/jitffi/pypy/jit/tl/pypyjit_demo.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/tl/pypyjit_demo.py (original) +++ pypy/branch/jitffi/pypy/jit/tl/pypyjit_demo.py Thu Sep 23 16:53:32 2010 @@ -39,16 +39,24 @@ try: from array import array + + def coords(w,h): + y = 0 + while y < h: + x = 0 + while x < w: + yield x,y + x += 1 + y += 1 + def f(img): - i=0 sa=0 - while i < img.__len__(): - sa+=img[i] - i+=1 + for x, y in coords(4,4): + sa += x * y return sa - img=array('h',(1,2,3,4)) - print f(img) + #img=array('h',(1,2,3,4)) + print f(3) except Exception, e: print "Exception: ", type(e) print e Modified: pypy/branch/jitffi/pypy/jit/tool/showstats.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/tool/showstats.py (original) +++ pypy/branch/jitffi/pypy/jit/tool/showstats.py Thu Sep 23 16:53:32 2010 @@ -17,7 +17,7 @@ num_dmp = 0 num_guards = 0 for op in loop.operations: - if op.opnum == rop.DEBUG_MERGE_POINT: + if op.getopnum() == rop.DEBUG_MERGE_POINT: num_dmp += 1 else: num_ops += 1 Modified: pypy/branch/jitffi/pypy/jit/tool/traceviewer.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/tool/traceviewer.py (original) +++ pypy/branch/jitffi/pypy/jit/tool/traceviewer.py Thu Sep 23 16:53:32 2010 @@ -253,9 +253,10 @@ def main(loopfile, use_threshold, view=True): countname = py.path.local(loopfile + '.count') if countname.check(): - counts = [re.split(r' +', line, 1) for line in countname.readlines()] - counts = Counts([(k.strip("\n"), int(v.strip('\n'))) - for v, k in counts]) + counts = [re.split('( 20 and use_threshold: counts.threshold = l[-20] Modified: pypy/branch/jitffi/pypy/module/__builtin__/functional.py ============================================================================== --- pypy/branch/jitffi/pypy/module/__builtin__/functional.py (original) +++ pypy/branch/jitffi/pypy/module/__builtin__/functional.py Thu Sep 23 16:53:32 2010 @@ -13,6 +13,7 @@ from pypy.rlib.objectmodel import specialize from pypy.module.__builtin__.app_functional import range as app_range from inspect import getsource, getfile +from pypy.rlib.jit import unroll_safe """ Implementation of the common integer case of range. Instead of handling @@ -96,12 +97,32 @@ return W_RangeListObject(start, step, howmany) + at unroll_safe @specialize.arg(2) def min_max(space, args, implementation_of): if implementation_of == "max": compare = space.gt else: compare = space.lt + + args_w = args.arguments_w + if len(args_w) == 2 and not args.keywords: + # Unrollable case + w_max_item = None + for w_item in args_w: + if w_max_item is None or \ + space.is_true(compare(w_item, w_max_item)): + w_max_item = w_item + return w_max_item + else: + return min_max_loop(space, args, implementation_of) + + at specialize.arg(2) +def min_max_loop(space, args, implementation_of): + if implementation_of == "max": + compare = space.gt + else: + compare = space.lt args_w = args.arguments_w if len(args_w) > 1: w_sequence = space.newtuple(args_w) Modified: pypy/branch/jitffi/pypy/module/__builtin__/test/test_minmax.py ============================================================================== --- pypy/branch/jitffi/pypy/module/__builtin__/test/test_minmax.py (original) +++ pypy/branch/jitffi/pypy/module/__builtin__/test/test_minmax.py Thu Sep 23 16:53:32 2010 @@ -51,3 +51,37 @@ def test_max_empty(self): raises(ValueError, max, []) + +class AppTestMaxTuple: + + def test_max_usual(self): + assert max((1, 2, 3)) == 3 + + def test_max_floats(self): + assert max((0.1, 2.7, 14.7)) == 14.7 + + def test_max_chars(self): + assert max(('a', 'b', 'c')) == 'c' + + def test_max_strings(self): + assert max(('aaa', 'bbb', 'c')) == 'c' + + def test_max_mixed(self): + assert max(('1', 2, 3, 'aa')) == 'aa' + +class AppTestMinList: + + def test_min_usual(self): + assert min([1, 2, 3]) == 1 + + def test_min_floats(self): + assert min([0.1, 2.7, 14.7]) == 0.1 + + def test_min_chars(self): + assert min(['a', 'b', 'c']) == 'a' + + def test_min_strings(self): + assert min(['aaa', 'bbb', 'c']) == 'aaa' + + def test_min_mixed(self): + assert min(['1', 2, 3, 'aa']) == 2 Modified: pypy/branch/jitffi/pypy/module/_ssl/test/test_ssl.py ============================================================================== --- pypy/branch/jitffi/pypy/module/_ssl/test/test_ssl.py (original) +++ pypy/branch/jitffi/pypy/module/_ssl/test/test_ssl.py Thu Sep 23 16:53:32 2010 @@ -60,8 +60,8 @@ cls.space = space def setup_method(self, method): - # https://connect.sigen-ca.si/index-en.html - ADDR = "connect.sigen-ca.si", 443 + # https://codespeak.net/ + ADDR = "codespeak.net", 443 self.w_s = self.space.appexec([self.space.wrap(ADDR)], """(ADDR): import socket Modified: pypy/branch/jitffi/pypy/module/array/interp_array.py ============================================================================== --- pypy/branch/jitffi/pypy/module/array/interp_array.py (original) +++ pypy/branch/jitffi/pypy/module/array/interp_array.py Thu Sep 23 16:53:32 2010 @@ -528,12 +528,15 @@ def array_tostring__Array(space, self): cbuf = self.charbuf() - s = '' - i = 0 - while i < self.len * mytype.bytes: - s += cbuf[i] - i += 1 + s = ''.join([cbuf[i] for i in xrange(self.len * mytype.bytes)]) return self.space.wrap(s) +## +## s = '' +## i = 0 +## while i < self.len * mytype.bytes: +## s += cbuf[i] +## i += 1 +## return self.space.wrap(s) def array_fromfile__Array_ANY_ANY(space, self, w_f, w_n): if not isinstance(w_f, W_File): Modified: pypy/branch/jitffi/pypy/module/gc/__init__.py ============================================================================== --- pypy/branch/jitffi/pypy/module/gc/__init__.py (original) +++ pypy/branch/jitffi/pypy/module/gc/__init__.py Thu Sep 23 16:53:32 2010 @@ -10,13 +10,25 @@ 'collect': 'interp_gc.collect', 'enable_finalizers': 'interp_gc.enable_finalizers', 'disable_finalizers': 'interp_gc.disable_finalizers', - 'estimate_heap_size': 'interp_gc.estimate_heap_size', 'garbage' : 'space.newlist([])', #'dump_heap_stats': 'interp_gc.dump_heap_stats', } def __init__(self, space, w_name): - ts = space.config.translation.type_system - if ts == 'ootype': - del self.interpleveldefs['dump_heap_stats'] + if (not space.config.translating or + space.config.translation.gctransformer == "framework"): + self.appleveldefs.update({ + 'dump_rpy_heap': 'app_referents.dump_rpy_heap', + }) + self.interpleveldefs.update({ + 'get_rpy_roots': 'referents.get_rpy_roots', + 'get_rpy_referents': 'referents.get_rpy_referents', + 'get_rpy_memory_usage': 'referents.get_rpy_memory_usage', + 'get_rpy_type_index': 'referents.get_rpy_type_index', + 'get_objects': 'referents.get_objects', + 'get_referents': 'referents.get_referents', + 'get_referrers': 'referents.get_referrers', + '_dump_rpy_heap': 'referents._dump_rpy_heap', + 'GcRef': 'referents.W_GcRef', + }) MixedModule.__init__(self, space, w_name) Modified: pypy/branch/jitffi/pypy/module/gc/interp_gc.py ============================================================================== --- pypy/branch/jitffi/pypy/module/gc/interp_gc.py (original) +++ pypy/branch/jitffi/pypy/module/gc/interp_gc.py Thu Sep 23 16:53:32 2010 @@ -24,36 +24,6 @@ # ____________________________________________________________ -import sys -platform = sys.platform - -def estimate_heap_size(space): - # XXX should be done with the help of the GCs - if platform == "linux2": - import os - pid = os.getpid() - try: - fd = os.open("/proc/" + str(pid) + "/status", os.O_RDONLY, 0777) - except OSError: - pass - else: - try: - content = os.read(fd, 1000000) - finally: - os.close(fd) - lines = content.split("\n") - for line in lines: - if line.startswith("VmSize:"): - start = line.find(" ") # try to ignore tabs - assert start > 0 - stop = len(line) - 3 - assert stop > 0 - result = int(line[start:stop].strip(" ")) * 1024 - return space.wrap(result) - raise OperationError(space.w_RuntimeError, - space.wrap("can't estimate the heap size")) -estimate_heap_size.unwrap_spec = [ObjSpace] - def dump_heap_stats(space, filename): tb = rgc._heap_stats() if not tb: Modified: pypy/branch/jitffi/pypy/module/gc/test/test_gc.py ============================================================================== --- pypy/branch/jitffi/pypy/module/gc/test/test_gc.py (original) +++ pypy/branch/jitffi/pypy/module/gc/test/test_gc.py Thu Sep 23 16:53:32 2010 @@ -59,13 +59,6 @@ raises(ValueError, gc.enable_finalizers) runtest(True) - def test_estimate_heap_size(self): - import sys, gc - if sys.platform == "linux2": - assert gc.estimate_heap_size() > 1024 - else: - raises(RuntimeError, gc.estimate_heap_size) - def test_enable(self): import gc assert gc.isenabled() Modified: pypy/branch/jitffi/pypy/module/pypyjit/test/test_pypy_c.py ============================================================================== --- pypy/branch/jitffi/pypy/module/pypyjit/test/test_pypy_c.py (original) +++ pypy/branch/jitffi/pypy/module/pypyjit/test/test_pypy_c.py Thu Sep 23 16:53:32 2010 @@ -762,6 +762,8 @@ else: n = 215 + print + print 'Test:', e1, e2, n, res self.run_source(''' class tst: pass @@ -779,6 +781,25 @@ return sa '''%(e1, e2), n, ([], res)) + def test_boolrewrite_ptr_single(self): + self.run_source(''' + class tst: + pass + def main(): + a = tst() + b = tst() + c = tst() + sa = 0 + for i in range(1000): + if a == b: sa += 1 + else: sa += 2 + if a != b: sa += 10000 + else: sa += 20000 + if i > 750: a = b + return sa + ''', 215, ([], 12481752)) + assert False + def test_array_sum(self): for tc, maxops in zip('bhilBHILfd', (38,) * 6 + (40, 40, 41, 38)): res = 19352859 @@ -1059,7 +1080,38 @@ ''', 170, ([], 1239690.0)) - + def test_min_max(self): + self.run_source(''' + def main(): + i=0 + sa=0 + while i < 2000: + sa+=min(max(i, 3000), 4000) + i+=1 + return sa + ''', 51, ([], 2000*3000)) + + def test_silly_max(self): + self.run_source(''' + def main(): + i=2 + sa=0 + while i < 2000: + sa+=max(*range(i)) + i+=1 + return sa + ''', 125, ([], 1997001)) + + def test_iter_max(self): + self.run_source(''' + def main(): + i=2 + sa=0 + while i < 2000: + sa+=max(range(i)) + i+=1 + return sa + ''', 88, ([], 1997001)) # test_circular Modified: pypy/branch/jitffi/pypy/module/select/interp_select.py ============================================================================== --- pypy/branch/jitffi/pypy/module/select/interp_select.py (original) +++ pypy/branch/jitffi/pypy/module/select/interp_select.py Thu Sep 23 16:53:32 2010 @@ -54,14 +54,11 @@ if space.is_w(w_timeout, space.w_None): timeout = -1 else: - # rationale for computing directly integer, instead - # of float + math.cell is that - # we have for free overflow check and noone really - # cares (since CPython does not try too hard to have - # a ceiling of value) + # we want to be compatible with cpython and also accept things + # that can be casted to integer (I think) try: # compute the integer - timeout = space.int_w(w_timeout) + timeout = space.int_w(space.int(w_timeout)) except (OverflowError, ValueError): raise OperationError(space.w_ValueError, space.wrap("math range error")) Modified: pypy/branch/jitffi/pypy/module/select/test/test_select.py ============================================================================== --- pypy/branch/jitffi/pypy/module/select/test/test_select.py (original) +++ pypy/branch/jitffi/pypy/module/select/test/test_select.py Thu Sep 23 16:53:32 2010 @@ -210,6 +210,14 @@ assert len(res[2]) == 0 assert res[0][0] == res[1][0] + def test_poll(self): + import select + class A(object): + def __int__(self): + return 3 + + select.poll().poll(A()) # assert did not crash + class AppTestSelectWithPipes(_AppTestSelect): "Use a pipe to get pairs of file descriptors" def setup_class(cls): @@ -275,4 +283,3 @@ s1, addr2 = cls.sock.accept() return s1, s2 - Modified: pypy/branch/jitffi/pypy/module/sys/version.py ============================================================================== --- pypy/branch/jitffi/pypy/module/sys/version.py (original) +++ pypy/branch/jitffi/pypy/module/sys/version.py Thu Sep 23 16:53:32 2010 @@ -4,10 +4,11 @@ import os -CPYTHON_VERSION = (2, 5, 2, "beta", 42) -CPYTHON_API_VERSION = 1012 +#XXX # the release serial 42 is not in range(16) +CPYTHON_VERSION = (2, 5, 2, "beta", 42) #XXX # sync patchlevel.h +CPYTHON_API_VERSION = 1012 #XXX # sync with include/modsupport.h -PYPY_VERSION = (1, 3, 0, "beta", '?') +PYPY_VERSION = (1, 3, 0, "beta", '?') #XXX # sync patchlevel.h # the last item is replaced by the svn revision ^^^ TRIM_URL_UP_TO = 'svn/pypy/' Modified: pypy/branch/jitffi/pypy/rlib/_rsocket_rffi.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/_rsocket_rffi.py (original) +++ pypy/branch/jitffi/pypy/rlib/_rsocket_rffi.py Thu Sep 23 16:53:32 2010 @@ -32,11 +32,13 @@ 'arpa/inet.h', 'stdint.h', 'errno.h', - 'netpacket/packet.h', - 'sys/ioctl.h', - 'net/if.h', ) - cond_includes = [('AF_NETLINK', 'linux/netlink.h')] + + cond_includes = [('AF_NETLINK', 'linux/netlink.h'), + ('AF_PACKET', 'netpacket/packet.h'), + ('AF_PACKET', 'sys/ioctl.h'), + ('AF_PACKET', 'net/if.h')] + libraries = () calling_conv = 'c' HEADER = ''.join(['#include <%s>\n' % filename for filename in includes]) Modified: pypy/branch/jitffi/pypy/rlib/rarithmetic.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/rarithmetic.py (original) +++ pypy/branch/jitffi/pypy/rlib/rarithmetic.py Thu Sep 23 16:53:32 2010 @@ -50,6 +50,11 @@ LONG_MASK = _Ltest*2-1 LONG_TEST = _Ltest +LONG_BIT_SHIFT = 0 +while (1 << LONG_BIT_SHIFT) != LONG_BIT: + LONG_BIT_SHIFT += 1 + assert LONG_BIT_SHIFT < 99, "LONG_BIT_SHIFT value not found?" + INFINITY = 1e200 * 1e200 NAN = INFINITY / INFINITY Modified: pypy/branch/jitffi/pypy/rlib/rgc.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/rgc.py (original) +++ pypy/branch/jitffi/pypy/rlib/rgc.py Thu Sep 23 16:53:32 2010 @@ -1,6 +1,7 @@ -import gc +import gc, types from pypy.rpython.extregistry import ExtRegistryEntry from pypy.rlib.objectmodel import we_are_translated +from pypy.rpython.lltypesystem import lltype, llmemory # ____________________________________________________________ # General GC features @@ -93,7 +94,7 @@ def specialize_call(self, hop): from pypy.rpython.error import TyperError - from pypy.rpython.lltypesystem import lltype, llmemory, rtuple + from pypy.rpython.lltypesystem import rtuple from pypy.annotation import model as annmodel from pypy.rpython.memory.gc.marksweep import X_CLONE, X_CLONE_PTR @@ -150,7 +151,6 @@ return annmodel.s_None def specialize_call(self, hop): - from pypy.rpython.lltypesystem import lltype hop.exception_cannot_occur() args_v = [] if len(hop.args_s) == 1: @@ -165,7 +165,6 @@ return annmodel.s_None def specialize_call(self, hop): - from pypy.rpython.lltypesystem import lltype [v_nbytes] = hop.inputargs(lltype.Signed) hop.exception_cannot_occur() return hop.genop('gc_set_max_heap_size', [v_nbytes], @@ -182,7 +181,6 @@ return annmodel.SomeBool() def specialize_call(self, hop): - from pypy.rpython.lltypesystem import lltype hop.exception_cannot_occur() return hop.genop('gc_can_move', hop.args_v, resulttype=hop.r_result) @@ -195,11 +193,9 @@ def compute_result_annotation(self): from pypy.annotation import model as annmodel from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP - from pypy.rpython.lltypesystem import lltype return annmodel.SomePtr(lltype.Ptr(ARRAY_TYPEID_MAP)) def specialize_call(self, hop): - from pypy.rpython.lltypesystem import lltype from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP hop.exception_is_here() return hop.genop('gc_heap_stats', [], resulttype=hop.r_result) @@ -209,7 +205,6 @@ When running directly, will pretend that gc is always moving (might be configurable in a future) """ - from pypy.rpython.lltypesystem import lltype return lltype.nullptr(TP) class MallocNonMovingEntry(ExtRegistryEntry): @@ -221,7 +216,6 @@ return malloc(s_TP, s_n, s_zero=s_zero) def specialize_call(self, hop, i_zero=None): - from pypy.rpython.lltypesystem import lltype # XXX assume flavor and zero to be None by now assert hop.args_s[0].is_constant() vlist = [hop.inputarg(lltype.Void, arg=0)] @@ -243,7 +237,6 @@ def ll_arraycopy(source, dest, source_start, dest_start, length): from pypy.rpython.lltypesystem.lloperation import llop - from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rlib.objectmodel import keepalive_until_here # supports non-overlapping copies only @@ -279,7 +272,6 @@ def ll_shrink_array(p, smallerlength): from pypy.rpython.lltypesystem.lloperation import llop - from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rlib.objectmodel import keepalive_until_here if llop.shrink_array(lltype.Bool, p, smallerlength): @@ -313,3 +305,221 @@ func._dont_inline_ = True func._gc_no_collect_ = True return func + +# ____________________________________________________________ + +def get_rpy_roots(): + "NOT_RPYTHON" + # Return the 'roots' from the GC. + # This stub is not usable on top of CPython. + # The gc typically returns a list that ends with a few NULL_GCREFs. + raise NotImplementedError + +def get_rpy_referents(gcref): + "NOT_RPYTHON" + x = gcref._x + if isinstance(x, list): + d = x + elif isinstance(x, dict): + d = x.keys() + x.values() + else: + d = [] + if hasattr(x, '__dict__'): + d = x.__dict__.values() + if hasattr(type(x), '__slots__'): + for slot in type(x).__slots__: + try: + d.append(getattr(x, slot)) + except AttributeError: + pass + # discard objects that are too random or that are _freeze_=True + return [_GcRef(x) for x in d if _keep_object(x)] + +def _keep_object(x): + if isinstance(x, type) or type(x) is types.ClassType: + return False # don't keep any type + if isinstance(x, (list, dict, str)): + return True # keep lists and dicts and strings + try: + return not x._freeze_() # don't keep any frozen object + except AttributeError: + return type(x).__module__ != '__builtin__' # keep non-builtins + except Exception: + return False # don't keep objects whose _freeze_() method explodes + +def get_rpy_memory_usage(gcref): + "NOT_RPYTHON" + # approximate implementation using CPython's type info + Class = type(gcref._x) + size = Class.__basicsize__ + if Class.__itemsize__ > 0: + size += Class.__itemsize__ * len(gcref._x) + return size + +def get_rpy_type_index(gcref): + "NOT_RPYTHON" + from pypy.rlib.rarithmetic import intmask + Class = gcref._x.__class__ + return intmask(id(Class)) + +def cast_gcref_to_int(gcref): + if we_are_translated(): + return lltype.cast_ptr_to_int(gcref) + else: + return id(gcref._x) + +def dump_rpy_heap(fd): + "NOT_RPYTHON" + raise NotImplementedError + +NULL_GCREF = lltype.nullptr(llmemory.GCREF.TO) + +class _GcRef(object): + # implementation-specific: there should not be any after translation + __slots__ = ['_x'] + def __init__(self, x): + self._x = x + def __hash__(self): + return object.__hash__(self._x) + def __eq__(self, other): + if isinstance(other, lltype._ptr): + assert other == NULL_GCREF, ( + "comparing a _GcRef with a non-NULL lltype ptr") + return False + assert isinstance(other, _GcRef) + return self._x is other._x + def __ne__(self, other): + return not self.__eq__(other) + def __repr__(self): + return "_GcRef(%r)" % (self._x, ) + def _freeze_(self): + raise Exception("instances of rlib.rgc._GcRef cannot be translated") + +def cast_instance_to_gcref(x): + # Before translation, casts an RPython instance into a _GcRef. + # After translation, it is a variant of cast_object_to_ptr(GCREF). + if we_are_translated(): + from pypy.rpython import annlowlevel + x = annlowlevel.cast_instance_to_base_ptr(x) + return lltype.cast_opaque_ptr(llmemory.GCREF, x) + else: + return _GcRef(x) +cast_instance_to_gcref._annspecialcase_ = 'specialize:argtype(0)' + +def try_cast_gcref_to_instance(Class, gcref): + # Before translation, unwraps the RPython instance contained in a _GcRef. + # After translation, it is a type-check performed by the GC. + if we_are_translated(): + from pypy.rpython.annlowlevel import base_ptr_lltype + from pypy.rpython.annlowlevel import cast_base_ptr_to_instance + from pypy.rpython.lltypesystem import rclass + if _is_rpy_instance(gcref): + objptr = lltype.cast_opaque_ptr(base_ptr_lltype(), gcref) + if objptr.typeptr: # may be NULL, e.g. in rdict's dummykeyobj + clsptr = _get_llcls_from_cls(Class) + if rclass.ll_isinstance(objptr, clsptr): + return cast_base_ptr_to_instance(Class, objptr) + return None + else: + if isinstance(gcref._x, Class): + return gcref._x + return None +try_cast_gcref_to_instance._annspecialcase_ = 'specialize:arg(0)' + +# ------------------- implementation ------------------- + +_cache_s_list_of_gcrefs = None + +def s_list_of_gcrefs(): + global _cache_s_list_of_gcrefs + if _cache_s_list_of_gcrefs is None: + from pypy.annotation import model as annmodel + from pypy.annotation.listdef import ListDef + s_gcref = annmodel.SomePtr(llmemory.GCREF) + _cache_s_list_of_gcrefs = annmodel.SomeList( + ListDef(None, s_gcref, mutated=True, resized=False)) + return _cache_s_list_of_gcrefs + +class Entry(ExtRegistryEntry): + _about_ = get_rpy_roots + def compute_result_annotation(self): + return s_list_of_gcrefs() + def specialize_call(self, hop): + return hop.genop('gc_get_rpy_roots', [], resulttype = hop.r_result) + +class Entry(ExtRegistryEntry): + _about_ = get_rpy_referents + def compute_result_annotation(self, s_gcref): + from pypy.annotation import model as annmodel + assert annmodel.SomePtr(llmemory.GCREF).contains(s_gcref) + return s_list_of_gcrefs() + def specialize_call(self, hop): + vlist = hop.inputargs(hop.args_r[0]) + return hop.genop('gc_get_rpy_referents', vlist, + resulttype = hop.r_result) + +class Entry(ExtRegistryEntry): + _about_ = get_rpy_memory_usage + def compute_result_annotation(self, s_gcref): + from pypy.annotation import model as annmodel + return annmodel.SomeInteger() + def specialize_call(self, hop): + vlist = hop.inputargs(hop.args_r[0]) + return hop.genop('gc_get_rpy_memory_usage', vlist, + resulttype = hop.r_result) + +class Entry(ExtRegistryEntry): + _about_ = get_rpy_type_index + def compute_result_annotation(self, s_gcref): + from pypy.annotation import model as annmodel + return annmodel.SomeInteger() + def specialize_call(self, hop): + vlist = hop.inputargs(hop.args_r[0]) + return hop.genop('gc_get_rpy_type_index', vlist, + resulttype = hop.r_result) + +def _is_rpy_instance(gcref): + "NOT_RPYTHON" + raise NotImplementedError + +def _get_llcls_from_cls(Class): + "NOT_RPYTHON" + raise NotImplementedError + +class Entry(ExtRegistryEntry): + _about_ = _is_rpy_instance + def compute_result_annotation(self, s_gcref): + from pypy.annotation import model as annmodel + return annmodel.SomeBool() + def specialize_call(self, hop): + vlist = hop.inputargs(hop.args_r[0]) + return hop.genop('gc_is_rpy_instance', vlist, + resulttype = hop.r_result) + +class Entry(ExtRegistryEntry): + _about_ = _get_llcls_from_cls + def compute_result_annotation(self, s_Class): + from pypy.annotation import model as annmodel + from pypy.rpython.lltypesystem import rclass + assert s_Class.is_constant() + return annmodel.SomePtr(rclass.CLASSTYPE) + def specialize_call(self, hop): + from pypy.rpython.rclass import getclassrepr + from pypy.objspace.flow.model import Constant + from pypy.rpython.lltypesystem import rclass + Class = hop.args_s[0].const + classdef = hop.rtyper.annotator.bookkeeper.getuniqueclassdef(Class) + classrepr = getclassrepr(hop.rtyper, classdef) + vtable = classrepr.getvtable() + assert lltype.typeOf(vtable) == rclass.CLASSTYPE + return Constant(vtable, concretetype=rclass.CLASSTYPE) + +class Entry(ExtRegistryEntry): + _about_ = dump_rpy_heap + def compute_result_annotation(self, s_fd): + from pypy.annotation.model import s_Bool + return s_Bool + def specialize_call(self, hop): + vlist = hop.inputargs(lltype.Signed) + hop.exception_is_here() + return hop.genop('gc_dump_rpy_heap', vlist, resulttype = hop.r_result) Modified: pypy/branch/jitffi/pypy/rlib/rstring.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/rstring.py (original) +++ pypy/branch/jitffi/pypy/rlib/rstring.py Thu Sep 23 16:53:32 2010 @@ -46,7 +46,9 @@ # -------------- public API --------------------------------- -INIT_SIZE = 100 # XXX tweak +# the following number is the maximum size of an RPython unicode +# string that goes into the nursery of the minimark GC. +INIT_SIZE = 56 class AbstractStringBuilder(object): def __init__(self, init_size=INIT_SIZE): Modified: pypy/branch/jitffi/pypy/rlib/rwin32.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/rwin32.py (original) +++ pypy/branch/jitffi/pypy/rlib/rwin32.py Thu Sep 23 16:53:32 2010 @@ -82,6 +82,8 @@ if WIN32: HANDLE = rffi.COpaquePtr(typedef='HANDLE') + assert rffi.cast(HANDLE, -1) == rffi.cast(HANDLE, -1) + LPHANDLE = rffi.CArrayPtr(HANDLE) HMODULE = HANDLE NULL_HANDLE = rffi.cast(HANDLE, 0) Modified: pypy/branch/jitffi/pypy/rlib/test/test_rgc.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/test/test_rgc.py (original) +++ pypy/branch/jitffi/pypy/rlib/test/test_rgc.py Thu Sep 23 16:53:32 2010 @@ -16,7 +16,7 @@ assert len(op.args) == 0 res = interpret(f, []) - + assert res is None def test_collect_0(): @@ -31,13 +31,13 @@ assert len(ops) == 1 op = ops[0][1] assert op.opname == 'gc__collect' - assert len(op.args) == 1 + assert len(op.args) == 1 assert op.args[0].value == 0 res = interpret(f, []) - - assert res is None - + + assert res is None + def test_can_move(): T0 = lltype.GcStruct('T') T1 = lltype.GcArray(lltype.Float) @@ -53,9 +53,9 @@ assert len(res) == 2 res = interpret(f, [1]) - + assert res == True - + def test_ll_arraycopy_1(): TYPE = lltype.GcArray(lltype.Signed) a1 = lltype.malloc(TYPE, 10) @@ -153,3 +153,21 @@ assert len(s2.vars) == 3 for i in range(3): assert s2.vars[i] == 50 + i + +def test_get_referents(): + class X(object): + __slots__ = ['stuff'] + x1 = X() + x1.stuff = X() + x2 = X() + lst = rgc.get_rpy_referents(rgc.cast_instance_to_gcref(x1)) + lst2 = [rgc.try_cast_gcref_to_instance(X, x) for x in lst] + assert x1.stuff in lst2 + assert x2 not in lst2 + +def test_get_memory_usage(): + class X(object): + pass + x1 = X() + n = rgc.get_rpy_memory_usage(rgc.cast_instance_to_gcref(x1)) + assert n >= 8 and n <= 64 Modified: pypy/branch/jitffi/pypy/rpython/llinterp.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/llinterp.py (original) +++ pypy/branch/jitffi/pypy/rpython/llinterp.py Thu Sep 23 16:53:32 2010 @@ -650,7 +650,7 @@ offsets, fieldvalue = fieldnamesval[:-1], fieldnamesval[-1] inneraddr, FIELD = self.getinneraddr(obj, *offsets) if FIELD is not lltype.Void: - self.heap.setinterior(obj, inneraddr, FIELD, fieldvalue) + self.heap.setinterior(obj, inneraddr, FIELD, fieldvalue, offsets) def op_bare_setinteriorfield(self, obj, *fieldnamesval): offsets, fieldvalue = fieldnamesval[:-1], fieldnamesval[-1] @@ -916,6 +916,24 @@ def op_gc_get_type_info_group(self): raise NotImplementedError("gc_get_type_info_group") + def op_gc_get_rpy_memory_usage(self): + raise NotImplementedError("gc_get_rpy_memory_usage") + + def op_gc_get_rpy_roots(self): + raise NotImplementedError("gc_get_rpy_roots") + + def op_gc_get_rpy_referents(self): + raise NotImplementedError("gc_get_rpy_referents") + + def op_gc_is_rpy_instance(self): + raise NotImplementedError("gc_is_rpy_instance") + + def op_gc_get_rpy_type_index(self): + raise NotImplementedError("gc_get_rpy_type_index") + + def op_gc_dump_rpy_heap(self): + raise NotImplementedError("gc_dump_rpy_heap") + def op_do_malloc_fixedsize_clear(self): raise NotImplementedError("do_malloc_fixedsize_clear") @@ -925,6 +943,9 @@ def op_get_write_barrier_failing_case(self): raise NotImplementedError("get_write_barrier_failing_case") + def op_get_write_barrier_from_array_failing_case(self): + raise NotImplementedError("get_write_barrier_from_array_failing_case") + def op_yield_current_frame_to_caller(self): raise NotImplementedError("yield_current_frame_to_caller") Modified: pypy/branch/jitffi/pypy/rpython/lltypesystem/ll2ctypes.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/lltypesystem/ll2ctypes.py (original) +++ pypy/branch/jitffi/pypy/rpython/lltypesystem/ll2ctypes.py Thu Sep 23 16:53:32 2010 @@ -26,9 +26,6 @@ from pypy.translator.platform import platform from array import array -def uaddressof(obj): - return fixid(ctypes.addressof(obj)) - _ctypes_cache = {} _eci_cache = {} @@ -251,7 +248,7 @@ else: n = None cstruct = cls._malloc(n) - add_storage(container, _struct_mixin, cstruct) + add_storage(container, _struct_mixin, ctypes.pointer(cstruct)) for field_name in STRUCT._names: FIELDTYPE = getattr(STRUCT, field_name) field_value = getattr(container, field_name) @@ -264,8 +261,6 @@ if isinstance(FIELDTYPE, lltype.Struct): csubstruct = getattr(cstruct, field_name) convert_struct(field_value, csubstruct) - subcontainer = getattr(container, field_name) - substorage = subcontainer._storage elif field_name == STRUCT._arrayfld: # inlined var-sized part csubarray = getattr(cstruct, field_name) convert_array(field_value, csubarray) @@ -292,7 +287,7 @@ # regular case: allocate a new ctypes array of the proper type cls = get_ctypes_type(ARRAY) carray = cls._malloc(container.getlength()) - add_storage(container, _array_mixin, carray) + add_storage(container, _array_mixin, ctypes.pointer(carray)) if not isinstance(ARRAY.OF, lltype.ContainerType): # fish that we have enough space ctypes_array = ctypes.cast(carray.items, @@ -321,13 +316,15 @@ if isinstance(FIELDTYPE, lltype.ContainerType): if isinstance(FIELDTYPE, lltype.Struct): struct_container = getattr(container, field_name) - struct_storage = getattr(ctypes_storage, field_name) + struct_storage = ctypes.pointer( + getattr(ctypes_storage.contents, field_name)) struct_use_ctypes_storage(struct_container, struct_storage) struct_container._setparentstructure(container, field_name) elif isinstance(FIELDTYPE, lltype.Array): assert FIELDTYPE._hints.get('nolength', False) == False arraycontainer = _array_of_known_length(FIELDTYPE) - arraycontainer._storage = getattr(ctypes_storage, field_name) + arraycontainer._storage = ctypes.pointer( + getattr(ctypes_storage.contents, field_name)) arraycontainer._setparentstructure(container, field_name) object.__setattr__(container, field_name, arraycontainer) else: @@ -352,6 +349,8 @@ def add_storage(instance, mixin_cls, ctypes_storage): """Put ctypes_storage on the instance, changing its __class__ so that it sees the methods of the given mixin class.""" + # _storage is a ctypes pointer to a structure + # except for Opaque objects which use a c_void_p. assert not isinstance(instance, _parentable_mixin) # not yet subcls = get_common_subclass(mixin_cls, instance.__class__) instance.__class__ = subcls @@ -365,17 +364,23 @@ __slots__ = () def _ctypes_storage_was_allocated(self): - addr = ctypes.addressof(self._storage) + addr = ctypes.cast(self._storage, ctypes.c_void_p).value if addr in ALLOCATED: raise Exception("internal ll2ctypes error - " "double conversion from lltype to ctypes?") # XXX don't store here immortal structures ALLOCATED[addr] = self + def _addressof_storage(self): + "Returns the storage address as an int" + if self._storage is None or self._storage is True: + raise ValueError("Not a ctypes allocated structure") + return ctypes.cast(self._storage, ctypes.c_void_p).value + def _free(self): self._check() # no double-frees # allow the ctypes object to go away now - addr = ctypes.addressof(self._storage) + addr = ctypes.cast(self._storage, ctypes.c_void_p).value try: del ALLOCATED[addr] except KeyError: @@ -393,16 +398,16 @@ raise RuntimeError("pointer comparison with a freed structure") if other._storage is True: return False # the other container is not ctypes-based - addressof_other = ctypes.addressof(other._storage) - # both containers are ctypes-based, compare by address - return (ctypes.addressof(self._storage) == addressof_other) + addressof_other = other._addressof_storage() + # both containers are ctypes-based, compare the addresses + return self._addressof_storage() == addressof_other def __ne__(self, other): return not (self == other) def __hash__(self): if self._storage is not None: - return ctypes.addressof(self._storage) + return self._addressof_storage() else: return object.__hash__(self) @@ -411,7 +416,7 @@ return '' % (self._TYPE,) else: return '' % (self._TYPE, - uaddressof(self._storage),) + fixid(self._addressof_storage())) def __str__(self): return repr(self) @@ -422,7 +427,7 @@ def __getattr__(self, field_name): T = getattr(self._TYPE, field_name) - cobj = getattr(self._storage, field_name) + cobj = getattr(self._storage.contents, field_name) return ctypes2lltype(T, cobj) def __setattr__(self, field_name, value): @@ -430,17 +435,17 @@ object.__setattr__(self, field_name, value) # '_xxx' attributes else: cobj = lltype2ctypes(value) - setattr(self._storage, field_name, cobj) + setattr(self._storage.contents, field_name, cobj) class _array_mixin(_parentable_mixin): """Mixin added to _array containers when they become ctypes-based.""" __slots__ = () def getitem(self, index, uninitialized_ok=False): - return self._storage._getitem(index) + return self._storage.contents._getitem(index) def setitem(self, index, value): - self._storage._setitem(index, value) + self._storage.contents._setitem(index, value) class _array_of_unknown_length(_parentable_mixin, lltype._parentable): _kind = "array" @@ -451,10 +456,10 @@ return 0, sys.maxint def getitem(self, index, uninitialized_ok=False): - return self._storage._getitem(index, boundscheck=False) + return self._storage.contents._getitem(index, boundscheck=False) def setitem(self, index, value): - self._storage._setitem(index, value, boundscheck=False) + self._storage.contents._setitem(index, value, boundscheck=False) def getitems(self): if self._TYPE.OF != lltype.Char: @@ -476,7 +481,7 @@ __slots__ = () def getlength(self): - return self._storage.length + return self._storage.contents.length def getbounds(self): return 0, self.getlength() @@ -653,17 +658,18 @@ container._ctypes_storage_was_allocated() if isinstance(T.TO, lltype.OpaqueType): - return container._storage + return container._storage.value storage = container._storage - p = ctypes.pointer(storage) + p = storage if index: p = ctypes.cast(p, ctypes.c_void_p) p = ctypes.c_void_p(p.value + index) c_tp = get_ctypes_type(T.TO) - storage._normalized_ctype = c_tp - if normalize and hasattr(storage, '_normalized_ctype'): - p = ctypes.cast(p, ctypes.POINTER(storage._normalized_ctype)) + storage.contents._normalized_ctype = c_tp + if normalize and hasattr(storage.contents, '_normalized_ctype'): + normalized_ctype = storage.contents._normalized_ctype + p = ctypes.cast(p, ctypes.POINTER(normalized_ctype)) if lltype.typeOf(llobj) == llmemory.GCREF: p = ctypes.cast(p, ctypes.c_void_p) return p @@ -707,13 +713,13 @@ cobjheader = ctypes.cast(cobj, get_ctypes_type(lltype.Ptr(OBJECT))) struct_use_ctypes_storage(containerheader, - cobjheader.contents) + cobjheader) REAL_TYPE = get_rtyper().get_type_for_typeptr( containerheader.typeptr) REAL_T = lltype.Ptr(REAL_TYPE) cobj = ctypes.cast(cobj, get_ctypes_type(REAL_T)) container = lltype._struct(REAL_TYPE) - struct_use_ctypes_storage(container, cobj.contents) + struct_use_ctypes_storage(container, cobj) if REAL_TYPE != T.TO: p = container._as_ptr() container = lltype.cast_pointer(T, p)._as_obj() @@ -728,10 +734,10 @@ elif isinstance(T.TO, lltype.Array): if T.TO._hints.get('nolength', False): container = _array_of_unknown_length(T.TO) - container._storage = cobj.contents + container._storage = type(cobj)(cobj.contents) else: container = _array_of_known_length(T.TO) - container._storage = cobj.contents + container._storage = type(cobj)(cobj.contents) elif isinstance(T.TO, lltype.FuncType): cobjkey = intmask(ctypes.cast(cobj, ctypes.c_void_p).value) if cobjkey in _int2obj: @@ -745,7 +751,8 @@ container = _llgcopaque(cobj) else: container = lltype._opaque(T.TO) - container._storage = ctypes.cast(cobj, ctypes.c_void_p) + cbuf = ctypes.cast(cobj, ctypes.c_void_p) + add_storage(container, _parentable_mixin, cbuf) else: raise NotImplementedError(T) llobj = lltype._ptr(T, container, solid=True) Modified: pypy/branch/jitffi/pypy/rpython/lltypesystem/llarena.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/lltypesystem/llarena.py (original) +++ pypy/branch/jitffi/pypy/rpython/lltypesystem/llarena.py Thu Sep 23 16:53:32 2010 @@ -16,8 +16,11 @@ class Arena(object): object_arena_location = {} # {container: (arena, offset)} old_object_arena_location = weakref.WeakKeyDictionary() + _count_arenas = 0 def __init__(self, nbytes, zero): + Arena._count_arenas += 1 + self._arena_index = Arena._count_arenas self.nbytes = nbytes self.usagemap = array.array('c') self.objectptrs = {} # {offset: ptr-to-container} @@ -25,6 +28,9 @@ self.freed = False self.reset(zero) + def __repr__(self): + return '' % (self._arena_index, self.nbytes) + def reset(self, zero, start=0, size=None): self.check() if size is None: @@ -40,7 +46,7 @@ assert offset >= stop, "object overlaps cleared area" else: obj = ptr._obj - del Arena.object_arena_location[obj] + _dictdel(Arena.object_arena_location, obj) del self.objectptrs[offset] del self.objectsizes[offset] obj._free() @@ -63,7 +69,7 @@ raise ArenaError("Address offset is outside the arena") return fakearenaaddress(self, offset) - def allocate_object(self, offset, size): + def allocate_object(self, offset, size, letter='x'): self.check() bytes = llmemory.raw_malloc_usage(size) if offset + bytes > self.nbytes: @@ -78,7 +84,7 @@ raise ArenaError("new object overlaps a previous object") assert offset not in self.objectptrs addr2 = size._raw_malloc([], zero=zero) - pattern = 'X' + 'x'*(bytes-1) + pattern = letter.upper() + letter*(bytes-1) self.usagemap[offset:offset+bytes] = array.array('c', pattern) self.setobject(addr2, offset, bytes) # common case: 'size' starts with a GCHeaderOffset. In this case @@ -252,6 +258,16 @@ raise RuntimeError(msg % (obj,)) return arena.getaddr(offset) +def _dictdel(d, key): + # hack + try: + del d[key] + except KeyError: + items = d.items() + d.clear() + d.update(items) + del d[key] + class RoundedUpForAllocation(llmemory.AddressOffset): """A size that is rounded up in order to preserve alignment of objects following it. For arenas containing heterogenous objects. @@ -297,6 +313,7 @@ assert isinstance(arena_addr, fakearenaaddress) assert arena_addr.offset == 0 arena_addr.arena.reset(False) + assert not arena_addr.arena.objectptrs arena_addr.arena.freed = True def arena_reset(arena_addr, size, zero): @@ -317,10 +334,13 @@ this is used to know what type of lltype object to allocate.""" from pypy.rpython.memory.lltypelayout import memory_alignment addr = getfakearenaaddress(addr) - if check_alignment and (addr.offset & (memory_alignment-1)) != 0: + letter = 'x' + if llmemory.raw_malloc_usage(size) == 1: + letter = 'b' # for Byte-aligned allocations + elif check_alignment and (addr.offset & (memory_alignment-1)) != 0: raise ArenaError("object at offset %d would not be correctly aligned" % (addr.offset,)) - addr.arena.allocate_object(addr.offset, size) + addr.arena.allocate_object(addr.offset, size, letter) def arena_shrink_obj(addr, newsize): """ Mark object as shorter than it was @@ -357,6 +377,11 @@ # This only works with linux's madvise(), which is really not a memory # usage hint but a real command. It guarantees that after MADV_DONTNEED # the pages are cleared again. + + # Note that the trick of the general 'posix' section below, i.e. + # reading /dev/zero, does not seem to have the correct effect of + # lazily-allocating pages on all Linux systems. + from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo _eci = ExternalCompilationInfo(includes=['sys/mman.h']) @@ -459,6 +484,7 @@ sandboxsafe=True) def llimpl_arena_free(arena_addr): + # NB. minimark.py assumes that arena_free() is actually just a raw_free(). llmemory.raw_free(arena_addr) register_external(arena_free, [llmemory.Address], None, 'll_arena.arena_free', llimpl=llimpl_arena_free, Modified: pypy/branch/jitffi/pypy/rpython/lltypesystem/llheap.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/lltypesystem/llheap.py (original) +++ pypy/branch/jitffi/pypy/rpython/lltypesystem/llheap.py Thu Sep 23 16:53:32 2010 @@ -8,7 +8,8 @@ from pypy.rlib.rgc import collect from pypy.rlib.rgc import can_move -def setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue): +def setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue, + offsets=None): assert typeOf(newvalue) == INNERTYPE # xxx access the address object's ref() directly for performance inneraddr.ref()[0] = newvalue Modified: pypy/branch/jitffi/pypy/rpython/lltypesystem/llmemory.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/lltypesystem/llmemory.py (original) +++ pypy/branch/jitffi/pypy/rpython/lltypesystem/llmemory.py Thu Sep 23 16:53:32 2010 @@ -409,6 +409,9 @@ if self.ptr is None: s = 'NULL' else: + #try: + # s = hex(self.ptr._cast_to_int()) + #except: s = str(self.ptr) return '' % (s,) Modified: pypy/branch/jitffi/pypy/rpython/lltypesystem/lloperation.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/lltypesystem/lloperation.py (original) +++ pypy/branch/jitffi/pypy/rpython/lltypesystem/lloperation.py Thu Sep 23 16:53:32 2010 @@ -436,6 +436,7 @@ 'do_malloc_fixedsize_clear':LLOp(canraise=(MemoryError,),canunwindgc=True), 'do_malloc_varsize_clear': LLOp(canraise=(MemoryError,),canunwindgc=True), 'get_write_barrier_failing_case': LLOp(sideeffects=False), + 'get_write_barrier_from_array_failing_case': LLOp(sideeffects=False), 'gc_get_type_info_group': LLOp(sideeffects=False), # __________ GC operations __________ @@ -467,6 +468,13 @@ 'gc_writebarrier_before_copy': LLOp(canrun=True), 'gc_heap_stats' : LLOp(canunwindgc=True), + 'gc_get_rpy_roots' : LLOp(), + 'gc_get_rpy_referents': LLOp(), + 'gc_get_rpy_memory_usage': LLOp(), + 'gc_get_rpy_type_index': LLOp(), + 'gc_is_rpy_instance' : LLOp(), + 'gc_dump_rpy_heap' : LLOp(), + # ------- JIT & GC interaction, only for some GCs ---------- 'gc_adr_of_nursery_free' : LLOp(), Modified: pypy/branch/jitffi/pypy/rpython/lltypesystem/test/test_ll2ctypes.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/lltypesystem/test/test_ll2ctypes.py (original) +++ pypy/branch/jitffi/pypy/rpython/lltypesystem/test/test_ll2ctypes.py Thu Sep 23 16:53:32 2010 @@ -353,6 +353,8 @@ assert tmppath.check(file=1) assert not ALLOCATED # detects memory leaks in the test + assert rffi.cast(FILEP, -1) == rffi.cast(FILEP, -1) + def test_simple_cast(self): assert rffi.cast(rffi.SIGNEDCHAR, 0x123456) == 0x56 assert rffi.cast(rffi.SIGNEDCHAR, 0x123481) == -127 @@ -1250,6 +1252,32 @@ assert i == llmemory.cast_adr_to_int(a, "forced") lltype.free(p, flavor='raw') + def test_freelist(self): + S = lltype.Struct('S', ('x', lltype.Signed), ('y', lltype.Signed)) + SP = lltype.Ptr(S) + chunk = lltype.malloc(rffi.CArrayPtr(S).TO, 10, flavor='raw') + assert lltype.typeOf(chunk) == rffi.CArrayPtr(S) + free_list = lltype.nullptr(rffi.VOIDP.TO) + # build list + current = chunk + for i in range(10): + rffi.cast(rffi.VOIDPP, current)[0] = free_list + free_list = rffi.cast(rffi.VOIDP, current) + current = rffi.ptradd(current, 1) + # get one + p = free_list + free_list = rffi.cast(rffi.VOIDPP, p)[0] + rffi.cast(SP, p).x = 0 + # get two + p = free_list + free_list = rffi.cast(rffi.VOIDPP, p)[0] + rffi.cast(SP, p).x = 0 + # get three + p = free_list + free_list = rffi.cast(rffi.VOIDPP, p)[0] + rffi.cast(SP, p).x = 0 + lltype.free(chunk, flavor='raw') + class TestPlatform(object): def test_lib_on_libpaths(self): from pypy.translator.platform import platform Modified: pypy/branch/jitffi/pypy/rpython/memory/gc/base.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/memory/gc/base.py (original) +++ pypy/branch/jitffi/pypy/rpython/memory/gc/base.py Thu Sep 23 16:53:32 2010 @@ -5,6 +5,7 @@ from pypy.rpython.memory.support import get_address_stack, get_address_deque from pypy.rpython.memory.support import AddressDict from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage +from pypy.rlib.rarithmetic import r_uint TYPEID_MAP = lltype.GcStruct('TYPEID_MAP', ('count', lltype.Signed), ('size', lltype.Signed), @@ -53,7 +54,8 @@ varsize_offset_to_length, varsize_offsets_to_gcpointers_in_var_part, weakpointer_offset, - member_index): + member_index, + is_rpython_class): self.getfinalizer = getfinalizer self.is_varsize = is_varsize self.has_gcptr_in_varsize = has_gcptr_in_varsize @@ -66,6 +68,7 @@ self.varsize_offsets_to_gcpointers_in_var_part = varsize_offsets_to_gcpointers_in_var_part self.weakpointer_offset = weakpointer_offset self.member_index = member_index + self.is_rpython_class = is_rpython_class def get_member_index(self, type_id): return self.member_index(type_id) @@ -101,6 +104,9 @@ def get_size(self, obj): return self._get_size_for_typeid(obj, self.get_type_id(obj)) + def get_size_incl_hash(self, obj): + return self.get_size(obj) + def malloc(self, typeid, length=0, zero=False): """For testing. The interface used by the gctransformer is the four malloc_[fixed,var]size[_clear]() functions. @@ -146,7 +152,7 @@ return False def set_max_heap_size(self, size): - pass + raise NotImplementedError def x_swap_pool(self, newpool): return newpool @@ -194,6 +200,39 @@ length -= 1 trace._annspecialcase_ = 'specialize:arg(2)' + def trace_partial(self, obj, start, stop, callback, arg): + """Like trace(), but only walk the array part, for indices in + range(start, stop). Must only be called if has_gcptr_in_varsize(). + """ + length = stop - start + typeid = self.get_type_id(obj) + if self.is_gcarrayofgcptr(typeid): + # a performance shortcut for GcArray(gcptr) + item = obj + llmemory.gcarrayofptr_itemsoffset + item += llmemory.gcarrayofptr_singleitemoffset * start + while length > 0: + if self.points_to_valid_gc_object(item): + callback(item, arg) + item += llmemory.gcarrayofptr_singleitemoffset + length -= 1 + return + ll_assert(self.has_gcptr_in_varsize(typeid), + "trace_partial() on object without has_gcptr_in_varsize()") + item = obj + self.varsize_offset_to_variable_part(typeid) + offsets = self.varsize_offsets_to_gcpointers_in_var_part(typeid) + itemlength = self.varsize_item_sizes(typeid) + item += itemlength * start + while length > 0: + j = 0 + while j < len(offsets): + itemobj = item + offsets[j] + if self.points_to_valid_gc_object(itemobj): + callback(itemobj, arg) + j += 1 + item += itemlength + length -= 1 + trace_partial._annspecialcase_ = 'specialize:arg(4)' + def points_to_valid_gc_object(self, addr): return self.is_valid_gc_object(addr.address[0]) @@ -340,6 +379,7 @@ "generation": "generation.GenerationGC", "hybrid": "hybrid.HybridGC", "markcompact" : "markcompact.MarkCompactGC", + "minimark" : "minimark.MiniMarkGC", } try: modulename, classname = classes[config.translation.gc].split('.') @@ -351,10 +391,12 @@ GCClass = getattr(module, classname) return GCClass, GCClass.TRANSLATION_PARAMS -def read_from_env(varname): +def _read_float_and_factor_from_env(varname): import os value = os.environ.get(varname) if value: + if len(value) > 1 and value[-1] in 'bB': + value = value[:-1] realvalue = value[:-1] if value[-1] in 'kK': factor = 1024 @@ -366,7 +408,21 @@ factor = 1 realvalue = value try: - return int(float(realvalue) * factor) + return (float(realvalue), factor) except ValueError: pass - return -1 + return (0.0, 0) + +def read_from_env(varname): + value, factor = _read_float_and_factor_from_env(varname) + return int(value * factor) + +def read_uint_from_env(varname): + value, factor = _read_float_and_factor_from_env(varname) + return r_uint(value * factor) + +def read_float_from_env(varname): + value, factor = _read_float_and_factor_from_env(varname) + if factor != 1: + return 0.0 + return value Modified: pypy/branch/jitffi/pypy/rpython/memory/gc/generation.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/memory/gc/generation.py (original) +++ pypy/branch/jitffi/pypy/rpython/memory/gc/generation.py Thu Sep 23 16:53:32 2010 @@ -449,7 +449,7 @@ # for the JIT: a minimal description of the write_barrier() method # (the JIT assumes it is of the shape - # "if newvalue.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") + # "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS def write_barrier(self, newvalue, addr_struct): Modified: pypy/branch/jitffi/pypy/rpython/memory/gc/markcompact.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/memory/gc/markcompact.py (original) +++ pypy/branch/jitffi/pypy/rpython/memory/gc/markcompact.py Thu Sep 23 16:53:32 2010 @@ -674,6 +674,13 @@ return llmemory.cast_adr_to_int(obj) # not in an arena... return adr - self.space + def get_size_incl_hash(self, obj): + size = self.get_size(obj) + hdr = self.header(obj) + if hdr.tid & GCFLAG_HASHFIELD: + size += llmemory.sizeof(lltype.Signed) + return size + # ____________________________________________________________ class CannotAllocateGCArena(Exception): Modified: pypy/branch/jitffi/pypy/rpython/memory/gc/test/test_direct.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/memory/gc/test/test_direct.py (original) +++ pypy/branch/jitffi/pypy/rpython/memory/gc/test/test_direct.py Thu Sep 23 16:53:32 2010 @@ -95,7 +95,10 @@ if self.gc.needs_write_barrier: newaddr = llmemory.cast_ptr_to_adr(newvalue) addr_struct = llmemory.cast_ptr_to_adr(p) - self.gc.write_barrier(newaddr, addr_struct) + if hasattr(self.gc, 'write_barrier_from_array'): + self.gc.write_barrier_from_array(newaddr, addr_struct, index) + else: + self.gc.write_barrier(newaddr, addr_struct) p[index] = newvalue def malloc(self, TYPE, n=None): @@ -326,6 +329,27 @@ self.gc.collect() assert hash == self.gc.identityhash(self.stackroots[-1]) self.stackroots.pop() + # (6) ask for the hash of varsized objects, larger and larger + for i in range(10): + self.gc.collect() + p = self.malloc(VAR, i) + self.stackroots.append(p) + hash = self.gc.identityhash(p) + self.gc.collect() + assert hash == self.gc.identityhash(self.stackroots[-1]) + self.stackroots.pop() + + def test_memory_alignment(self): + A1 = lltype.GcArray(lltype.Char) + for i in range(50): + p1 = self.malloc(A1, i) + if i: + p1[i-1] = chr(i) + self.stackroots.append(p1) + self.gc.collect() + for i in range(1, 50): + p = self.stackroots[-50+i] + assert p[i-1] == chr(i) class TestSemiSpaceGC(DirectGCTest): from pypy.rpython.memory.gc.semispace import SemiSpaceGC as GCClass @@ -456,3 +480,35 @@ def test_varsized_from_prebuilt_gc(self): DirectGCTest.test_varsized_from_prebuilt_gc(self) test_varsized_from_prebuilt_gc.GC_PARAMS = {'space_size': 3 * 1024 * WORD} + + +class TestMiniMarkGCSimple(DirectGCTest): + from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass + from pypy.rpython.memory.gc.minimark import SimpleArenaCollection + # test the GC itself, providing a simple class for ArenaCollection + GC_PARAMS = {'ArenaCollectionClass': SimpleArenaCollection} + + def test_card_marker(self): + for arraylength in (range(4, 17) + + [69] # 3 bytes + + [300]): # 10 bytes + print 'array length:', arraylength + nums = {} + a = self.malloc(VAR, arraylength) + self.stackroots.append(a) + for i in range(50): + p = self.malloc(S) + p.x = -i + a = self.stackroots[-1] + index = (i*i) % arraylength + self.writearray(a, index, p) + nums[index] = p.x + # + for index, expected_x in nums.items(): + assert a[index].x == expected_x + self.stackroots.pop() + test_card_marker.GC_PARAMS = {"card_page_indices": 4, + "card_page_indices_min": 7} + +class TestMiniMarkGCFull(DirectGCTest): + from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass Modified: pypy/branch/jitffi/pypy/rpython/memory/gctransform/framework.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/memory/gctransform/framework.py (original) +++ pypy/branch/jitffi/pypy/rpython/memory/gctransform/framework.py Thu Sep 23 16:53:32 2010 @@ -7,7 +7,7 @@ from pypy.rpython.memory.gc import marksweep from pypy.rpython.memory.gcheader import GCHeaderBuilder from pypy.rlib.rarithmetic import ovfcheck -from pypy.rlib import rstack +from pypy.rlib import rstack, rgc from pypy.rlib.debug import ll_assert from pypy.translator.backendopt import graphanalyze from pypy.translator.backendopt.support import var_needsgc @@ -139,6 +139,8 @@ def __init__(self, translator): from pypy.rpython.memory.gc.base import choose_gc_from_config from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP + from pypy.rpython.memory.gc import inspect + super(FrameworkGCTransformer, self).__init__(translator, inline=True) if hasattr(self, 'GC_PARAMS'): # for tests: the GC choice can be specified as class attributes @@ -180,6 +182,7 @@ gcdata.gc.set_root_walker(root_walker) self.num_pushs = 0 self.write_barrier_calls = 0 + self.write_barrier_from_array_calls = 0 def frameworkgc_setup(): # run-time initialization code @@ -388,11 +391,38 @@ else: self.id_ptr = None + self.get_rpy_roots_ptr = getfn(inspect.get_rpy_roots, + [s_gc], + rgc.s_list_of_gcrefs(), + minimal_transform=False) + self.get_rpy_referents_ptr = getfn(inspect.get_rpy_referents, + [s_gc, s_gcref], + rgc.s_list_of_gcrefs(), + minimal_transform=False) + self.get_rpy_memory_usage_ptr = getfn(inspect.get_rpy_memory_usage, + [s_gc, s_gcref], + annmodel.SomeInteger(), + minimal_transform=False) + self.get_rpy_type_index_ptr = getfn(inspect.get_rpy_type_index, + [s_gc, s_gcref], + annmodel.SomeInteger(), + minimal_transform=False) + self.is_rpy_instance_ptr = getfn(inspect.is_rpy_instance, + [s_gc, s_gcref], + annmodel.SomeBool(), + minimal_transform=False) + self.dump_rpy_heap_ptr = getfn(inspect.dump_rpy_heap, + [s_gc, annmodel.SomeInteger()], + annmodel.s_Bool, + minimal_transform=False) + self.set_max_heap_size_ptr = getfn(GCClass.set_max_heap_size.im_func, [s_gc, annmodel.SomeInteger(nonneg=True)], annmodel.s_None) + self.write_barrier_ptr = None + self.write_barrier_from_array_ptr = None if GCClass.needs_write_barrier: self.write_barrier_ptr = getfn(GCClass.write_barrier.im_func, [s_gc, @@ -408,8 +438,26 @@ [annmodel.SomeAddress(), annmodel.SomeAddress()], annmodel.s_None) - else: - self.write_barrier_ptr = None + func = getattr(GCClass, 'write_barrier_from_array', None) + if func is not None: + self.write_barrier_from_array_ptr = getfn(func.im_func, + [s_gc, + annmodel.SomeAddress(), + annmodel.SomeAddress(), + annmodel.SomeInteger()], + annmodel.s_None, + inline=True) + func = getattr(gcdata.gc, 'remember_young_pointer_from_array', + None) + if func is not None: + # func should not be a bound method, but a real function + assert isinstance(func, types.FunctionType) + self.write_barrier_from_array_failing_case_ptr = \ + getfn(func, + [annmodel.SomeAddress(), + annmodel.SomeInteger(), + annmodel.SomeAddress()], + annmodel.s_None) self.statistics_ptr = getfn(GCClass.statistics.im_func, [s_gc, annmodel.SomeInteger()], annmodel.SomeInteger()) @@ -496,6 +544,9 @@ if self.write_barrier_ptr: log.info("inserted %s write barrier calls" % ( self.write_barrier_calls, )) + if self.write_barrier_from_array_ptr: + log.info("inserted %s write_barrier_from_array calls" % ( + self.write_barrier_from_array_calls, )) # XXX because we call inputconst already in replace_malloc, we can't # modify the instance, we have to modify the 'rtyped instance' @@ -766,6 +817,12 @@ [self.write_barrier_failing_case_ptr], resultvar=op.result) + def gct_get_write_barrier_from_array_failing_case(self, hop): + op = hop.spaceop + hop.genop("same_as", + [self.write_barrier_from_array_failing_case_ptr], + resultvar=op.result) + def gct_zero_gc_pointers_inside(self, hop): if not self.malloc_zero_filled: v_ob = hop.spaceop.args[0] @@ -883,6 +940,53 @@ def gct_gc_get_type_info_group(self, hop): return hop.cast_result(self.c_type_info_group) + def gct_gc_get_rpy_roots(self, hop): + livevars = self.push_roots(hop) + hop.genop("direct_call", + [self.get_rpy_roots_ptr, self.c_const_gc], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + + def gct_gc_get_rpy_referents(self, hop): + livevars = self.push_roots(hop) + [v_ptr] = hop.spaceop.args + hop.genop("direct_call", + [self.get_rpy_referents_ptr, self.c_const_gc, v_ptr], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + + def gct_gc_get_rpy_memory_usage(self, hop): + livevars = self.push_roots(hop) + [v_ptr] = hop.spaceop.args + hop.genop("direct_call", + [self.get_rpy_memory_usage_ptr, self.c_const_gc, v_ptr], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + + def gct_gc_get_rpy_type_index(self, hop): + livevars = self.push_roots(hop) + [v_ptr] = hop.spaceop.args + hop.genop("direct_call", + [self.get_rpy_type_index_ptr, self.c_const_gc, v_ptr], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + + def gct_gc_is_rpy_instance(self, hop): + livevars = self.push_roots(hop) + [v_ptr] = hop.spaceop.args + hop.genop("direct_call", + [self.is_rpy_instance_ptr, self.c_const_gc, v_ptr], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + + def gct_gc_dump_rpy_heap(self, hop): + livevars = self.push_roots(hop) + [v_fd] = hop.spaceop.args + hop.genop("direct_call", + [self.dump_rpy_heap_ptr, self.c_const_gc, v_fd], + resultvar=hop.spaceop.result) + self.pop_roots(hop, livevars) + def gct_malloc_nonmovable_varsize(self, hop): TYPE = hop.spaceop.result.concretetype if self.gcdata.gc.can_malloc_nonmovable(): @@ -897,6 +1001,15 @@ c = rmodel.inputconst(TYPE, lltype.nullptr(TYPE.TO)) return hop.cast_result(c) + def _set_into_gc_array_part(self, op): + if op.opname == 'setarrayitem': + return op.args[1] + if op.opname == 'setinteriorfield': + for v in op.args[1:-1]: + if v.concretetype is not lltype.Void: + return v + return None + def transform_generic_set(self, hop): from pypy.objspace.flow.model import Constant opname = hop.spaceop.opname @@ -910,15 +1023,26 @@ and not isinstance(v_newvalue, Constant) and v_struct.concretetype.TO._gckind == "gc" and hop.spaceop not in self.clean_sets): - self.write_barrier_calls += 1 v_newvalue = hop.genop("cast_ptr_to_adr", [v_newvalue], resulttype = llmemory.Address) v_structaddr = hop.genop("cast_ptr_to_adr", [v_struct], resulttype = llmemory.Address) - hop.genop("direct_call", [self.write_barrier_ptr, - self.c_const_gc, - v_newvalue, - v_structaddr]) + if (self.write_barrier_from_array_ptr is not None and + self._set_into_gc_array_part(hop.spaceop) is not None): + self.write_barrier_from_array_calls += 1 + v_index = self._set_into_gc_array_part(hop.spaceop) + assert v_index.concretetype == lltype.Signed + hop.genop("direct_call", [self.write_barrier_from_array_ptr, + self.c_const_gc, + v_newvalue, + v_structaddr, + v_index]) + else: + self.write_barrier_calls += 1 + hop.genop("direct_call", [self.write_barrier_ptr, + self.c_const_gc, + v_newvalue, + v_structaddr]) hop.rename('bare_' + opname) def transform_getfield_typeptr(self, hop): Modified: pypy/branch/jitffi/pypy/rpython/memory/gctypelayout.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/memory/gctypelayout.py (original) +++ pypy/branch/jitffi/pypy/rpython/memory/gctypelayout.py Thu Sep 23 16:53:32 2010 @@ -101,6 +101,10 @@ infobits = self.get(typeid).infobits return infobits & T_MEMBER_INDEX + def q_is_rpython_class(self, typeid): + infobits = self.get(typeid).infobits + return infobits & T_IS_RPYTHON_INSTANCE != 0 + def set_query_functions(self, gc): gc.set_query_functions( self.q_is_varsize, @@ -114,7 +118,8 @@ self.q_varsize_offset_to_length, self.q_varsize_offsets_to_gcpointers_in_var_part, self.q_weakpointer_offset, - self.q_member_index) + self.q_member_index, + self.q_is_rpython_class) # the lowest 16bits are used to store group member index @@ -123,6 +128,7 @@ T_HAS_GCPTR_IN_VARSIZE = 0x20000 T_IS_GCARRAY_OF_GCPTR = 0x40000 T_IS_WEAKREF = 0x80000 +T_IS_RPYTHON_INSTANCE = 0x100000 # the type is a subclass of OBJECT T_KEY_MASK = intmask(0xFF000000) T_KEY_VALUE = intmask(0x7A000000) # bug detection only @@ -181,6 +187,8 @@ varinfo.varitemsize = llmemory.sizeof(ARRAY.OF) if builder.is_weakref_type(TYPE): infobits |= T_IS_WEAKREF + if is_subclass_of_object(TYPE): + infobits |= T_IS_RPYTHON_INSTANCE info.infobits = infobits | T_KEY_VALUE # ____________________________________________________________ @@ -259,9 +267,7 @@ else: # no vtable from lltype2vtable -- double-check to be sure # that it's not a subclass of OBJECT. - while isinstance(TYPE, lltype.GcStruct): - assert TYPE is not rclass.OBJECT - _, TYPE = TYPE._first_struct() + assert not is_subclass_of_object(TYPE) def get_info(self, type_id): res = llop.get_group_member(GCData.TYPE_INFO_PTR, @@ -437,6 +443,13 @@ for i in range(p._obj.getlength()): zero_gc_pointers_inside(p[i], ITEM) +def is_subclass_of_object(TYPE): + while isinstance(TYPE, lltype.GcStruct): + if TYPE is rclass.OBJECT: + return True + _, TYPE = TYPE._first_struct() + return False + ########## weakrefs ########## # framework: weakref objects are small structures containing only an address Modified: pypy/branch/jitffi/pypy/rpython/memory/gcwrapper.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/memory/gcwrapper.py (original) +++ pypy/branch/jitffi/pypy/rpython/memory/gcwrapper.py Thu Sep 23 16:53:32 2010 @@ -15,6 +15,8 @@ self.llinterp = llinterp self.prepare_graphs(flowgraphs) self.gc.setup() + self.has_write_barrier_from_array = hasattr(self.gc, + 'write_barrier_from_array') def prepare_graphs(self, flowgraphs): lltype2vtable = self.llinterp.typer.lltype2vtable @@ -78,13 +80,30 @@ ARRAY = lltype.typeOf(array).TO addr = llmemory.cast_ptr_to_adr(array) addr += llmemory.itemoffsetof(ARRAY, index) - self.setinterior(array, addr, ARRAY.OF, newitem) + self.setinterior(array, addr, ARRAY.OF, newitem, (index,)) - def setinterior(self, toplevelcontainer, inneraddr, INNERTYPE, newvalue): + def setinterior(self, toplevelcontainer, inneraddr, INNERTYPE, newvalue, + offsets=()): if (lltype.typeOf(toplevelcontainer).TO._gckind == 'gc' and isinstance(INNERTYPE, lltype.Ptr) and INNERTYPE.TO._gckind == 'gc'): - self.gc.write_barrier(llmemory.cast_ptr_to_adr(newvalue), - llmemory.cast_ptr_to_adr(toplevelcontainer)) + # + wb = True + if self.has_write_barrier_from_array: + for index in offsets: + if type(index) is not str: + assert (type(index) is int # <- fast path + or lltype.typeOf(index) == lltype.Signed) + self.gc.write_barrier_from_array( + llmemory.cast_ptr_to_adr(newvalue), + llmemory.cast_ptr_to_adr(toplevelcontainer), + index) + wb = False + break + # + if wb: + self.gc.write_barrier( + llmemory.cast_ptr_to_adr(newvalue), + llmemory.cast_ptr_to_adr(toplevelcontainer)) llheap.setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue) def collect(self, *gen): Modified: pypy/branch/jitffi/pypy/rpython/memory/lltypelayout.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/memory/lltypelayout.py (original) +++ pypy/branch/jitffi/pypy/rpython/memory/lltypelayout.py Thu Sep 23 16:53:32 2010 @@ -7,7 +7,7 @@ primitive_to_fmt = {lltype.Signed: "l", lltype.Unsigned: "L", lltype.Char: "c", - lltype.UniChar: "H", # maybe + lltype.UniChar: "i", # 4 bytes lltype.Bool: "B", lltype.Float: "d", llmemory.Address: "P", Modified: pypy/branch/jitffi/pypy/rpython/memory/support.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/memory/support.py (original) +++ pypy/branch/jitffi/pypy/rpython/memory/support.py Thu Sep 23 16:53:32 2010 @@ -216,6 +216,24 @@ self.index_in_oldest = index + 1 return result + def foreach(self, callback, arg): + """Invoke 'callback(address, arg)' for all addresses in the deque. + Typically, 'callback' is a bound method and 'arg' can be None. + """ + chunk = self.oldest_chunk + index = self.index_in_oldest + while chunk is not self.newest_chunk: + while index < chunk_size: + callback(chunk.items[index], arg) + index += 1 + chunk = chunk.next + index = 0 + limit = self.index_in_newest + while index < limit: + callback(chunk.items[index], arg) + index += 1 + foreach._annspecialcase_ = 'specialize:arg(1)' + def delete(self): cur = self.oldest_chunk while cur: Modified: pypy/branch/jitffi/pypy/rpython/memory/test/test_gc.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/memory/test/test_gc.py (original) +++ pypy/branch/jitffi/pypy/rpython/memory/test/test_gc.py Thu Sep 23 16:53:32 2010 @@ -26,8 +26,9 @@ class GCTest(object): GC_PARAMS = {} GC_CAN_MOVE = False - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True GC_CAN_SHRINK_ARRAY = False + GC_CAN_SHRINK_BIG_ARRAY = False def setup_class(cls): cls._saved_logstate = py.log._getstate() @@ -451,10 +452,10 @@ a = rgc.malloc_nonmovable(TP, 3) if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 - assert self.interpret(func, []) == int(self.GC_CANNOT_MALLOC_NONMOVABLE) + assert self.interpret(func, []) == int(self.GC_CAN_MALLOC_NONMOVABLE) def test_malloc_nonmovable_fixsize(self): S = lltype.GcStruct('S', ('x', lltype.Float)) @@ -465,37 +466,36 @@ rgc.collect() if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 except Exception, e: return 2 - assert self.interpret(func, []) == int(self.GC_CANNOT_MALLOC_NONMOVABLE) + assert self.interpret(func, []) == int(self.GC_CAN_MALLOC_NONMOVABLE) def test_shrink_array(self): from pypy.rpython.lltypesystem.rstr import STR - GC_CAN_SHRINK_ARRAY = self.GC_CAN_SHRINK_ARRAY - def f(n, m): + def f(n, m, gc_can_shrink_array): ptr = lltype.malloc(STR, n) ptr.hash = 0x62 ptr.chars[0] = 'A' ptr.chars[1] = 'B' ptr.chars[2] = 'C' ptr2 = rgc.ll_shrink_array(ptr, 2) - assert (ptr == ptr2) == GC_CAN_SHRINK_ARRAY + assert (ptr == ptr2) == gc_can_shrink_array rgc.collect() return ( ord(ptr2.chars[0]) + (ord(ptr2.chars[1]) << 8) + (len(ptr2.chars) << 16) + (ptr2.hash << 24)) - assert self.interpret(f, [3, 0]) == 0x62024241 - # don't test with larger numbers of top of the Hybrid GC, because - # the default settings make it a too-large varsized object that - # gets allocated outside the semispace - if not isinstance(self, TestHybridGC): - assert self.interpret(f, [12, 0]) == 0x62024241 + flag = self.GC_CAN_SHRINK_ARRAY + assert self.interpret(f, [3, 0, flag]) == 0x62024241 + # with larger numbers, it gets allocated outside the semispace + # with some GCs. + flag = self.GC_CAN_SHRINK_BIG_ARRAY + assert self.interpret(f, [12, 0, flag]) == 0x62024241 def test_tagged_simple(self): from pypy.rlib.objectmodel import UnboxedValue @@ -568,7 +568,7 @@ assert res == 111 def test_writebarrier_before_copy(self): - S = lltype.GcStruct('S') + S = lltype.GcStruct('S', ('x', lltype.Char)) TP = lltype.GcArray(lltype.Ptr(S)) def fn(): l = lltype.malloc(TP, 100) @@ -628,8 +628,9 @@ class TestSemiSpaceGC(GCTest, snippet.SemiSpaceGCTests): from pypy.rpython.memory.gc.semispace import SemiSpaceGC as GCClass GC_CAN_MOVE = True - GC_CANNOT_MALLOC_NONMOVABLE = True + GC_CAN_MALLOC_NONMOVABLE = False GC_CAN_SHRINK_ARRAY = True + GC_CAN_SHRINK_BIG_ARRAY = True class TestGrowingSemiSpaceGC(TestSemiSpaceGC): GC_PARAMS = {'space_size': 16*WORD} @@ -641,16 +642,15 @@ from pypy.rpython.memory.gc.markcompact import MarkCompactGC as GCClass GC_PARAMS = {'space_size': 65536+16384} GC_CAN_SHRINK_ARRAY = False + GC_CAN_SHRINK_BIG_ARRAY = False def test_finalizer_order(self): py.test.skip("Not implemented yet") - def test_writebarrier_before_copy(self): - py.test.skip("Not relevant, and crashes because llarena does not " - "support empty GcStructs") class TestHybridGC(TestGenerationalGC): from pypy.rpython.memory.gc.hybrid import HybridGC as GCClass - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True + GC_CAN_SHRINK_BIG_ARRAY = False def test_ref_from_rawmalloced_to_regular(self): import gc @@ -720,7 +720,7 @@ from pypy.rpython.memory.gc.hybrid import HybridGC as GCClass GC_CAN_MOVE = False # with this size of heap, stuff gets allocated # in 3rd gen. - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True GC_PARAMS = {'space_size': 48*WORD, 'min_nursery_size': 12*WORD, 'nursery_size': 12*WORD, @@ -764,3 +764,13 @@ def test_malloc_nonmovable_fixsize(self): py.test.skip("Not supported") + + +class TestMiniMarkGC(TestSemiSpaceGC): + from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass + GC_CAN_SHRINK_BIG_ARRAY = False + GC_CAN_MALLOC_NONMOVABLE = True + +class TestMiniMarkGCCardMarking(TestMiniMarkGC): + GC_PARAMS = {'card_page_indices': 4, + 'card_page_indices_min': 10} Modified: pypy/branch/jitffi/pypy/rpython/memory/test/test_support.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/memory/test/test_support.py (original) +++ pypy/branch/jitffi/pypy/rpython/memory/test/test_support.py Thu Sep 23 16:53:32 2010 @@ -113,6 +113,27 @@ deque.append(x) expected.append(x) + def test_foreach(self): + AddressDeque = get_address_deque(10) + ll = AddressDeque() + for num_entries in range(30, -1, -1): + addrs = [raw_malloc(llmemory.sizeof(lltype.Signed)) + for i in range(num_entries)] + for a in addrs: + ll.append(a) + + seen = [] + def callback(addr, fortytwo): + assert fortytwo == 42 + seen.append(addr) + + ll.foreach(callback, 42) + assert seen == addrs + for a in addrs: + b = ll.popleft() + assert a == b + assert not ll.non_empty() + def test_stack_annotate(): AddressStack = get_address_stack(60) Modified: pypy/branch/jitffi/pypy/rpython/memory/test/test_transformed_gc.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/memory/test/test_transformed_gc.py (original) +++ pypy/branch/jitffi/pypy/rpython/memory/test/test_transformed_gc.py Thu Sep 23 16:53:32 2010 @@ -47,7 +47,7 @@ gcpolicy = None stacklessgc = False GC_CAN_MOVE = False - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True taggedpointers = False def setup_class(cls): @@ -242,6 +242,26 @@ heap_size = self.heap_usage(statistics) assert heap_size < 16000 * WORD / 4 # xxx + def define_llinterp_dict(self): + class A(object): + pass + def malloc_a_lot(): + i = 0 + while i < 10: + i += 1 + a = (1, 2, i) + b = {a: A()} + j = 0 + while j < 20: + j += 1 + b[1, j, i] = A() + return 0 + return malloc_a_lot + + def test_llinterp_dict(self): + run = self.runner("llinterp_dict") + run([]) + def skipdefine_global_list(cls): gl = [] class Box: @@ -602,8 +622,8 @@ rgc.collect() if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 #except Exception, e: # return 2 @@ -611,7 +631,7 @@ def test_malloc_nonmovable(self): run = self.runner("malloc_nonmovable") - assert int(self.GC_CANNOT_MALLOC_NONMOVABLE) == run([]) + assert int(self.GC_CAN_MALLOC_NONMOVABLE) == run([]) def define_malloc_nonmovable_fixsize(cls): S = lltype.GcStruct('S', ('x', lltype.Float)) @@ -622,8 +642,8 @@ rgc.collect() if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 except Exception, e: return 2 @@ -631,7 +651,7 @@ def test_malloc_nonmovable_fixsize(self): run = self.runner("malloc_nonmovable_fixsize") - assert run([]) == int(self.GC_CANNOT_MALLOC_NONMOVABLE) + assert run([]) == int(self.GC_CAN_MALLOC_NONMOVABLE) def define_shrink_array(cls): from pypy.rpython.lltypesystem.rstr import STR @@ -680,7 +700,8 @@ class GenericMovingGCTests(GenericGCTests): GC_CAN_MOVE = True - GC_CANNOT_MALLOC_NONMOVABLE = True + GC_CAN_MALLOC_NONMOVABLE = False + GC_CAN_TEST_ID = False def define_many_ids(cls): class A(object): @@ -710,7 +731,8 @@ return f def test_many_ids(self): - py.test.skip("fails for bad reasons in lltype.py :-(") + if not self.GC_CAN_TEST_ID: + py.test.skip("fails for bad reasons in lltype.py :-(") run = self.runner("many_ids") run([]) @@ -856,7 +878,7 @@ # (and give fixedsize) def define_writebarrier_before_copy(cls): - S = lltype.GcStruct('S') + S = lltype.GcStruct('S', ('x', lltype.Char)) TP = lltype.GcArray(lltype.Ptr(S)) def fn(): l = lltype.malloc(TP, 100) @@ -1144,10 +1166,6 @@ GC_PARAMS = {'space_size': 4096*WORD} root_stack_depth = 200 - def test_writebarrier_before_copy(self): - py.test.skip("Not relevant, and crashes because llarena does not " - "support empty GcStructs") - class TestGenerationGC(GenericMovingGCTests): gcname = "generation" GC_CAN_SHRINK_ARRAY = True @@ -1379,7 +1397,7 @@ class TestHybridGC(TestGenerationGC): gcname = "hybrid" - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True class gcpolicy(gc.FrameworkGcPolicy): class transformerclass(framework.FrameworkGCTransformer): @@ -1444,6 +1462,23 @@ def test_malloc_nonmovable_fixsize(self): py.test.skip("not supported") + +class TestMiniMarkGC(TestHybridGC): + gcname = "minimark" + GC_CAN_TEST_ID = True + + class gcpolicy(gc.FrameworkGcPolicy): + class transformerclass(framework.FrameworkGCTransformer): + from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass + GC_PARAMS = {'nursery_size': 32*WORD, + 'page_size': 16*WORD, + 'arena_size': 64*WORD, + 'small_request_threshold': 5*WORD, + 'card_page_indices': 4, + 'card_page_indices_min': 10, + } + root_stack_depth = 200 + # ________________________________________________________________ # tagged pointers Modified: pypy/branch/jitffi/pypy/rpython/rptr.py ============================================================================== --- pypy/branch/jitffi/pypy/rpython/rptr.py (original) +++ pypy/branch/jitffi/pypy/rpython/rptr.py Thu Sep 23 16:53:32 2010 @@ -35,6 +35,9 @@ id = lltype.cast_ptr_to_int(p) return ll_str.ll_int2hex(r_uint(id), True) + def get_ll_eq_function(self): + return None + def rtype_getattr(self, hop): attr = hop.args_s[1].const if isinstance(hop.s_result, annmodel.SomeLLADTMeth): Modified: pypy/branch/jitffi/pypy/translator/c/funcgen.py ============================================================================== --- pypy/branch/jitffi/pypy/translator/c/funcgen.py (original) +++ pypy/branch/jitffi/pypy/translator/c/funcgen.py Thu Sep 23 16:53:32 2010 @@ -733,6 +733,8 @@ continue elif T == Signed: format.append('%ld') + elif T == Unsigned: + format.append('%lu') elif T == Float: format.append('%f') elif isinstance(T, Ptr) or T == Address: Modified: pypy/branch/jitffi/pypy/translator/c/gcc/trackgcroot.py ============================================================================== --- pypy/branch/jitffi/pypy/translator/c/gcc/trackgcroot.py (original) +++ pypy/branch/jitffi/pypy/translator/c/gcc/trackgcroot.py Thu Sep 23 16:53:32 2010 @@ -856,6 +856,7 @@ visit_and = FunctionGcRootTracker._visit_and visit_xchgl = FunctionGcRootTracker._visit_xchg + visit_xchgq = FunctionGcRootTracker._visit_xchg # used in "xor reg, reg" to create a NULL GC ptr visit_xorl = FunctionGcRootTracker.binary_insn Modified: pypy/branch/jitffi/pypy/translator/c/genc.py ============================================================================== --- pypy/branch/jitffi/pypy/translator/c/genc.py (original) +++ pypy/branch/jitffi/pypy/translator/c/genc.py Thu Sep 23 16:53:32 2010 @@ -592,7 +592,7 @@ if sys.platform == 'win32': python = sys.executable.replace('\\', '/') + ' ' else: - python = '' + python = sys.executable + ' ' if self.translator.platform.name == 'msvc': lblofiles = [] Modified: pypy/branch/jitffi/pypy/translator/c/src/mem.h ============================================================================== --- pypy/branch/jitffi/pypy/translator/c/src/mem.h (original) +++ pypy/branch/jitffi/pypy/translator/c/src/mem.h Thu Sep 23 16:53:32 2010 @@ -224,3 +224,13 @@ #define OP_CAST_PTR_TO_WEAKREFPTR(x, r) r = x #define OP_CAST_WEAKREFPTR_TO_PTR(x, r) r = x + +/************************************************************/ +/* dummy version of these operations, e.g. with Boehm */ + +#define OP_GC_GET_RPY_ROOTS(r) r = 0 +#define OP_GC_GET_RPY_REFERENTS(x, r) r = 0 +#define OP_GC_GET_RPY_MEMORY_USAGE(x, r) r = -1 +#define OP_GC_GET_RPY_TYPE_INDEX(x, r) r = -1 +#define OP_GC_IS_RPY_INSTANCE(x, r) r = 0 +#define OP_GC_DUMP_RPY_HEAP(r) r = 0 Modified: pypy/branch/jitffi/pypy/translator/c/test/test_newgc.py ============================================================================== --- pypy/branch/jitffi/pypy/translator/c/test/test_newgc.py (original) +++ pypy/branch/jitffi/pypy/translator/c/test/test_newgc.py Thu Sep 23 16:53:32 2010 @@ -2,7 +2,7 @@ import sys, os, inspect from pypy.objspace.flow.model import summary -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.memory.test import snippet from pypy.rlib import rgc @@ -19,10 +19,11 @@ removetypeptr = False taggedpointers = False GC_CAN_MOVE = False - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True GC_CAN_SHRINK_ARRAY = False _isolated_func = None + c_allfuncs = None @classmethod def _makefunc_str_int(cls, f): @@ -111,6 +112,7 @@ def teardown_class(cls): if hasattr(cls.c_allfuncs, 'close_isolate'): cls.c_allfuncs.close_isolate() + cls.c_allfuncs = None def run(self, name, *args): if not args: @@ -690,8 +692,8 @@ rgc.collect() if a: assert not rgc.can_move(a) - return 0 - return 1 + return 1 + return 0 except Exception, e: return 2 @@ -699,7 +701,7 @@ def test_malloc_nonmovable(self): res = self.run('malloc_nonmovable') - assert res == self.GC_CANNOT_MALLOC_NONMOVABLE + assert res == self.GC_CAN_MALLOC_NONMOVABLE def define_resizable_buffer(cls): from pypy.rpython.lltypesystem.rstr import STR @@ -891,12 +893,208 @@ def test_arraycopy_writebarrier_ptr(self): self.run("arraycopy_writebarrier_ptr") + def define_get_rpy_roots(self): + U = lltype.GcStruct('U', ('x', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + + def g(s): + lst = rgc.get_rpy_roots() + found = False + for x in lst: + if x == lltype.cast_opaque_ptr(llmemory.GCREF, s): + found = True + if x == lltype.cast_opaque_ptr(llmemory.GCREF, s.u): + os.write(2, "s.u should not be found!\n") + assert False + return found == 1 + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + found = g(s) + if not found: + os.write(2, "not found!\n") + assert False + s.u.x = 42 + return 0 + + return fn + + def test_get_rpy_roots(self): + self.run("get_rpy_roots") + + def define_get_rpy_referents(self): + U = lltype.GcStruct('U', ('x', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, s.u) + lst = rgc.get_rpy_referents(gcref1) + assert gcref2 in lst + assert gcref1 not in lst + s.u.x = 42 + return 0 + + return fn + + def test_get_rpy_referents(self): + self.run("get_rpy_referents") + + def define_is_rpy_instance(self): + class Foo: + pass + S = lltype.GcStruct('S', ('x', lltype.Signed)) + + def check(gcref, expected): + result = rgc._is_rpy_instance(gcref) + assert result == expected + + def fn(): + s = lltype.malloc(S) + gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + check(gcref1, False) + + f = Foo() + gcref3 = rgc.cast_instance_to_gcref(f) + check(gcref3, True) + + return 0 + + return fn + + def test_is_rpy_instance(self): + self.run("is_rpy_instance") + + def define_try_cast_gcref_to_instance(self): + class Foo: + pass + class FooBar(Foo): + pass + class Biz(object): + pass + S = lltype.GcStruct('S', ('x', lltype.Signed)) + + def fn(): + foo = Foo() + gcref1 = rgc.cast_instance_to_gcref(foo) + assert rgc.try_cast_gcref_to_instance(Foo, gcref1) is foo + assert rgc.try_cast_gcref_to_instance(FooBar, gcref1) is None + assert rgc.try_cast_gcref_to_instance(Biz, gcref1) is None + + foobar = FooBar() + gcref2 = rgc.cast_instance_to_gcref(foobar) + assert rgc.try_cast_gcref_to_instance(Foo, gcref2) is foobar + assert rgc.try_cast_gcref_to_instance(FooBar, gcref2) is foobar + assert rgc.try_cast_gcref_to_instance(Biz, gcref2) is None + + s = lltype.malloc(S) + gcref3 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + assert rgc.try_cast_gcref_to_instance(Foo, gcref3) is None + assert rgc.try_cast_gcref_to_instance(FooBar, gcref3) is None + assert rgc.try_cast_gcref_to_instance(Biz, gcref3) is None + + return 0 + + return fn + + def test_try_cast_gcref_to_instance(self): + self.run("try_cast_gcref_to_instance") + + def define_get_rpy_memory_usage(self): + U = lltype.GcStruct('U', ('x1', lltype.Signed), + ('x2', lltype.Signed), + ('x3', lltype.Signed), + ('x4', lltype.Signed), + ('x5', lltype.Signed), + ('x6', lltype.Signed), + ('x7', lltype.Signed), + ('x8', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + A = lltype.GcArray(lltype.Ptr(S)) + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + a = lltype.malloc(A, 1000) + gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + int1 = rgc.get_rpy_memory_usage(gcref1) + assert 8 <= int1 <= 32 + gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, s.u) + int2 = rgc.get_rpy_memory_usage(gcref2) + assert 4*9 <= int2 <= 8*12 + gcref3 = lltype.cast_opaque_ptr(llmemory.GCREF, a) + int3 = rgc.get_rpy_memory_usage(gcref3) + assert 4*1001 <= int3 <= 8*1010 + return 0 + + return fn + + def test_get_rpy_memory_usage(self): + self.run("get_rpy_memory_usage") + + def define_get_rpy_type_index(self): + U = lltype.GcStruct('U', ('x', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + A = lltype.GcArray(lltype.Ptr(S)) + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + a = lltype.malloc(A, 1000) + s2 = lltype.malloc(S) + gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s) + int1 = rgc.get_rpy_type_index(gcref1) + gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, s.u) + int2 = rgc.get_rpy_type_index(gcref2) + gcref3 = lltype.cast_opaque_ptr(llmemory.GCREF, a) + int3 = rgc.get_rpy_type_index(gcref3) + gcref4 = lltype.cast_opaque_ptr(llmemory.GCREF, s2) + int4 = rgc.get_rpy_type_index(gcref4) + assert int1 != int2 + assert int1 != int3 + assert int2 != int3 + assert int1 == int4 + return 0 + + return fn + + def test_get_rpy_type_index(self): + self.run("get_rpy_type_index") + + filename_dump = str(udir.join('test_dump_rpy_heap')) + def define_dump_rpy_heap(self): + U = lltype.GcStruct('U', ('x', lltype.Signed)) + S = lltype.GcStruct('S', ('u', lltype.Ptr(U))) + A = lltype.GcArray(lltype.Ptr(S)) + filename = self.filename_dump + + def fn(): + s = lltype.malloc(S) + s.u = lltype.malloc(U) + a = lltype.malloc(A, 1000) + s2 = lltype.malloc(S) + # + fd = os.open(filename, os.O_WRONLY | os.O_CREAT, 0666) + rgc.dump_rpy_heap(fd) + os.close(fd) + return 0 + + return fn + + def test_dump_rpy_heap(self): + self.run("dump_rpy_heap") + assert os.path.exists(self.filename_dump) + assert os.path.getsize(self.filename_dump) > 0 # minimal test + class TestSemiSpaceGC(TestUsingFramework, snippet.SemiSpaceGCTestDefines): gcpolicy = "semispace" should_be_moving = True GC_CAN_MOVE = True - GC_CANNOT_MALLOC_NONMOVABLE = True + GC_CAN_MALLOC_NONMOVABLE = False GC_CAN_SHRINK_ARRAY = True # for snippets @@ -1055,7 +1253,7 @@ class TestHybridGC(TestGenerationalGC): gcpolicy = "hybrid" should_be_moving = True - GC_CANNOT_MALLOC_NONMOVABLE = False + GC_CAN_MALLOC_NONMOVABLE = True def test_gc_set_max_heap_size(self): py.test.skip("not implemented") @@ -1126,6 +1324,15 @@ res = self.run("adding_a_hash") assert res == 0 +class TestMiniMarkGC(TestSemiSpaceGC): + gcpolicy = "minimark" + should_be_moving = True + GC_CAN_MALLOC_NONMOVABLE = True + GC_CAN_SHRINK_ARRAY = True + + def test_gc_heap_stats(self): + py.test.skip("not implemented") + # ____________________________________________________________________ class TaggedPointersTest(object): @@ -1180,3 +1387,6 @@ class TestMarkCompactGCMostCompact(TaggedPointersTest, TestMarkCompactGC): removetypeptr = True + +class TestMiniMarkGCMostCompact(TaggedPointersTest, TestMiniMarkGC): + removetypeptr = True Modified: pypy/branch/jitffi/pypy/translator/exceptiontransform.py ============================================================================== --- pypy/branch/jitffi/pypy/translator/exceptiontransform.py (original) +++ pypy/branch/jitffi/pypy/translator/exceptiontransform.py Thu Sep 23 16:53:32 2010 @@ -277,7 +277,9 @@ block.exits[0].target is graph.returnblock and len(block.operations) and (block.exits[0].args[0].concretetype is lltype.Void or - block.exits[0].args[0] is block.operations[-1].result)): + block.exits[0].args[0] is block.operations[-1].result) and + block.operations[-1].opname not in ('malloc', # special cases + 'malloc_nonmovable')): last_operation -= 1 lastblock = block for i in range(last_operation, -1, -1): @@ -466,6 +468,9 @@ c_flags = spaceop.args[1] c_flags.value = c_flags.value.copy() spaceop.args[1].value['zero'] = True + # NB. when inserting more special-cases here, keep in mind that + # you also need to list the opnames in transform_block() + # (see "special cases") if insert_zeroing_op: if normalafterblock is None: From antocuni at codespeak.net Thu Sep 23 16:56:57 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Thu, 23 Sep 2010 16:56:57 +0200 (CEST) Subject: [pypy-svn] r77303 - pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt Message-ID: <20100923145657.CB823282C18@codespeak.net> Author: antocuni Date: Thu Sep 23 16:56:56 2010 New Revision: 77303 Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py Log: adapt to the new resoperation interface Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py Thu Sep 23 16:56:56 2010 @@ -20,7 +20,7 @@ return None def optimize_CALL(self, op): - funcbox = op.args[0] + funcbox = op.getarg(0) oopspec = self.get_oopspec(funcbox) if oopspec == 'prepare_call': self.do_prepare_call(op) @@ -33,26 +33,26 @@ self.emit_operation(op) def do_prepare_call(self, op): - funcbox = op.args[1] + funcbox = op.getarg(1) assert funcbox not in self.func_args self.func_args[funcbox] = [] def do_push_arg(self, op): - funcbox = op.args[1] + funcbox = op.getarg(1) self.func_args[funcbox].append(op) def do_call(self, op): - funcbox = op.args[1] - funcsymbox = op.args[2] + funcbox = op.getarg(1) + funcsymbox = op.getarg(2) arglist = [funcsymbox] for push_op in self.func_args[funcbox]: - arglist.append(push_op.args[2]) + arglist.append(push_op.getarg(2)) newop = ResOperation(rop.CALL_C, arglist, op.result, None) del self.func_args[funcbox] return newop def propagate_forward(self, op): - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) From antocuni at codespeak.net Thu Sep 23 17:09:08 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Thu, 23 Sep 2010 17:09:08 +0200 (CEST) Subject: [pypy-svn] r77304 - pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt Message-ID: <20100923150908.7DEC0282C18@codespeak.net> Author: antocuni Date: Thu Sep 23 17:09:06 2010 New Revision: 77304 Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py Log: use getvalue() everywhere, instead of accessing directly to the boxes Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py Thu Sep 23 17:09:06 2010 @@ -7,10 +7,10 @@ def __init__(self): self.func_args = {} - def get_oopspec(self, funcbox): + def get_oopspec(self, funcval): # XXX: not RPython at all, just a hack while waiting to have an # "official" way to know if and which oopspec we are calling - funcname = str(funcbox) + funcname = str(funcval.box) if '_libffi_prepare_call' in funcname: return 'prepare_call' elif '_libffi_push_arg' in funcname: @@ -20,8 +20,8 @@ return None def optimize_CALL(self, op): - funcbox = op.getarg(0) - oopspec = self.get_oopspec(funcbox) + funcval = self.getvalue(op.getarg(0)) + oopspec = self.get_oopspec(funcval) if oopspec == 'prepare_call': self.do_prepare_call(op) return @@ -33,22 +33,25 @@ self.emit_operation(op) def do_prepare_call(self, op): - funcbox = op.getarg(1) - assert funcbox not in self.func_args - self.func_args[funcbox] = [] + funcval = self.getvalue(op.getarg(1)) + assert funcval not in self.func_args + self.func_args[funcval] = [] def do_push_arg(self, op): - funcbox = op.getarg(1) - self.func_args[funcbox].append(op) + # we store the op in func_args because we might want to emit it later, + # in case we give up with the optimization + funcval = self.getvalue(op.getarg(1)) + self.func_args[funcval].append(op) def do_call(self, op): - funcbox = op.getarg(1) - funcsymbox = op.getarg(2) - arglist = [funcsymbox] - for push_op in self.func_args[funcbox]: - arglist.append(push_op.getarg(2)) + funcval = self.getvalue(op.getarg(1)) + funcsymval = self.getvalue(op.getarg(2)) + arglist = [funcsymval.force_box()] + for push_op in self.func_args[funcval]: + argval = self.getvalue(push_op.getarg(2)) + arglist.append(argval.force_box()) newop = ResOperation(rop.CALL_C, arglist, op.result, None) - del self.func_args[funcbox] + del self.func_args[funcval] return newop def propagate_forward(self, op): From arigo at codespeak.net Thu Sep 23 17:20:14 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 23 Sep 2010 17:20:14 +0200 (CEST) Subject: [pypy-svn] r77305 - in pypy/branch/jit-str/pypy/jit/backend: test x86 Message-ID: <20100923152014.F1F3D282C18@codespeak.net> Author: arigo Date: Thu Sep 23 17:20:13 2010 New Revision: 77305 Modified: pypy/branch/jit-str/pypy/jit/backend/test/runner_test.py pypy/branch/jit-str/pypy/jit/backend/x86/assembler.py pypy/branch/jit-str/pypy/jit/backend/x86/codebuf.py pypy/branch/jit-str/pypy/jit/backend/x86/regalloc.py Log: Test and code for COPYSTRCONTENT in the x86 backend. Modified: pypy/branch/jit-str/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/backend/test/runner_test.py (original) +++ pypy/branch/jit-str/pypy/jit/backend/test/runner_test.py Thu Sep 23 17:20:13 2010 @@ -814,6 +814,17 @@ r = self.execute_operation(rop.STRGETITEM, [s_box, BoxInt(4)], 'int') assert r.value == 153 + def test_copystrcontent(self): + s_box = self.alloc_string("abcdef") + for srcstart_box in [BoxInt(2), ConstInt(2)]: + for dststart_box in [BoxInt(3), ConstInt(3)]: + for length_box in [BoxInt(4), ConstInt(4)]: + r_box = self.alloc_string("!???????!") + self.execute_operation(rop.COPYSTRCONTENT, + [s_box, r_box, srcstart_box, + dststart_box, length_box], 'void') + assert self.look_string(r_box) == "!??cdef?!" + def test_do_unicode_basic(self): u = self.cpu.bh_newunicode(5) self.cpu.bh_unicodesetitem(u, 4, 123) @@ -1197,6 +1208,10 @@ s_box = BoxPtr(lltype.cast_opaque_ptr(llmemory.GCREF, s)) return s_box + def look_string(self, string_box): + s = string_box.getref(lltype.Ptr(rstr.STR)) + return ''.join(s.chars) + def alloc_unicode(self, unicode): u = rstr.mallocunicode(len(unicode)) for i in range(len(unicode)): Modified: pypy/branch/jit-str/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/backend/x86/assembler.py (original) +++ pypy/branch/jit-str/pypy/jit/backend/x86/assembler.py Thu Sep 23 17:20:13 2010 @@ -181,6 +181,7 @@ self.malloc_fixedsize_slowpath1 = 0 self.malloc_fixedsize_slowpath2 = 0 self.pending_guard_tokens = None + self.memcpy_addr = 0 self.setup_failure_recovery() self._debug = False self.debug_counter_descr = cpu.fielddescrof(DEBUG_COUNTER, 'i') @@ -212,6 +213,7 @@ ll_new_unicode = gc_ll_descr.get_funcptr_for_newunicode() self.malloc_unicode_func_addr = rffi.cast(lltype.Signed, ll_new_unicode) + self.memcpy_addr = self.cpu.cast_ptr_to_int(codebuf.memcpy_fn) self.mc = MachineCodeBlockWrapper(self, self.mc_size, self.cpu.profile_agent) self._build_failure_recovery(False) self._build_failure_recovery(True) @@ -712,8 +714,8 @@ self.regalloc_perform_with_guard(None, guard_op, faillocs, arglocs, resloc, current_depths) - def load_effective_addr(self, sizereg, baseofs, scale, result): - self.mc.LEA(result, addr_add(imm(0), sizereg, baseofs, scale)) + def load_effective_addr(self, sizereg, baseofs, scale, result, frm=imm(0)): + self.mc.LEA(result, addr_add(frm, sizereg, baseofs, scale)) def _unaryop(asmop): def genop_unary(self, op, arglocs, resloc): Modified: pypy/branch/jit-str/pypy/jit/backend/x86/codebuf.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/backend/x86/codebuf.py (original) +++ pypy/branch/jit-str/pypy/jit/backend/x86/codebuf.py Thu Sep 23 17:20:13 2010 @@ -1,6 +1,6 @@ import os, sys -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.jit.backend.x86.rx86 import X86_32_CodeBuilder, X86_64_CodeBuilder from pypy.jit.backend.x86.regloc import LocationCodeBuilder @@ -158,6 +158,12 @@ # ____________________________________________________________ +memcpy_fn = rffi.llexternal('memcpy', [llmemory.Address, llmemory.Address, + rffi.SIZE_T], lltype.Void, + sandboxsafe=True, _nowrapper=True) + +# ____________________________________________________________ + if sys.platform == 'win32': ensure_sse2_floats = lambda : None else: Modified: pypy/branch/jit-str/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/branch/jit-str/pypy/jit/backend/x86/regalloc.py Thu Sep 23 17:20:13 2010 @@ -936,6 +936,39 @@ consider_unicodegetitem = consider_strgetitem + def consider_copystrcontent(self, op): + # compute the source address + base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) + ofs_loc = self.rm.make_sure_var_in_reg(op.args[2], op.args) + self.rm.possibly_free_var(op.args[0]) + self.rm.possibly_free_var(op.args[2]) + srcaddr_box = TempBox() + srcaddr_loc = self.rm.force_allocate_reg(srcaddr_box) + self._gen_address_inside_string(base_loc, ofs_loc, srcaddr_loc) + # compute the destination address + base_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args) + ofs_loc = self.rm.make_sure_var_in_reg(op.args[3], op.args) + self.rm.possibly_free_var(op.args[1]) + self.rm.possibly_free_var(op.args[3]) + dstaddr_box = TempBox() + dstaddr_loc = self.rm.force_allocate_reg(dstaddr_box) + self._gen_address_inside_string(base_loc, ofs_loc, dstaddr_loc) + # call memcpy() + length_loc = self.loc(op.args[4]) + self.assembler._emit_call(imm(self.assembler.memcpy_addr), + [dstaddr_loc, srcaddr_loc, length_loc]) + self.rm.possibly_free_var(op.args[4]) + self.rm.possibly_free_var(dstaddr_box) + self.rm.possibly_free_var(srcaddr_box) + + def _gen_address_inside_string(self, baseloc, ofsloc, resloc): + cpu = self.assembler.cpu + ofs_items, itemsize, _ = symbolic.get_array_token(rstr.STR, + self.translate_support_code) + assert itemsize == 1 + self.assembler.load_effective_addr(ofsloc, ofs_items, 0, + resloc, baseloc) + def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None From arigo at codespeak.net Thu Sep 23 17:39:59 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 23 Sep 2010 17:39:59 +0200 (CEST) Subject: [pypy-svn] r77306 - in pypy/branch/jit-str/pypy/jit/backend: test x86 x86/test Message-ID: <20100923153959.2CF71282C1A@codespeak.net> Author: arigo Date: Thu Sep 23 17:39:57 2010 New Revision: 77306 Added: pypy/branch/jit-str/pypy/jit/backend/x86/test/test_string.py (contents, props changed) Modified: pypy/branch/jit-str/pypy/jit/backend/test/runner_test.py pypy/branch/jit-str/pypy/jit/backend/x86/regalloc.py pypy/branch/jit-str/pypy/jit/backend/x86/rx86.py Log: Add the tests from the front-end here too, and fix the failures in register allocation caught by them. Modified: pypy/branch/jit-str/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/backend/test/runner_test.py (original) +++ pypy/branch/jit-str/pypy/jit/backend/test/runner_test.py Thu Sep 23 17:39:57 2010 @@ -816,14 +816,20 @@ def test_copystrcontent(self): s_box = self.alloc_string("abcdef") - for srcstart_box in [BoxInt(2), ConstInt(2)]: - for dststart_box in [BoxInt(3), ConstInt(3)]: - for length_box in [BoxInt(4), ConstInt(4)]: - r_box = self.alloc_string("!???????!") - self.execute_operation(rop.COPYSTRCONTENT, - [s_box, r_box, srcstart_box, - dststart_box, length_box], 'void') - assert self.look_string(r_box) == "!??cdef?!" + for s_box in [s_box, s_box.constbox()]: + for srcstart_box in [BoxInt(2), ConstInt(2)]: + for dststart_box in [BoxInt(3), ConstInt(3)]: + for length_box in [BoxInt(4), ConstInt(4)]: + for r_box_is_const in [False, True]: + r_box = self.alloc_string("!???????!") + if r_box_is_const: + r_box = r_box.constbox() + self.execute_operation(rop.COPYSTRCONTENT, + [s_box, r_box, + srcstart_box, + dststart_box, + length_box], 'void') + assert self.look_string(r_box) == "!??cdef?!" def test_do_unicode_basic(self): u = self.cpu.bh_newunicode(5) Modified: pypy/branch/jit-str/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/branch/jit-str/pypy/jit/backend/x86/regalloc.py Thu Sep 23 17:39:57 2010 @@ -955,6 +955,8 @@ self._gen_address_inside_string(base_loc, ofs_loc, dstaddr_loc) # call memcpy() length_loc = self.loc(op.args[4]) + self.rm.before_call() + self.xrm.before_call() self.assembler._emit_call(imm(self.assembler.memcpy_addr), [dstaddr_loc, srcaddr_loc, length_loc]) self.rm.possibly_free_var(op.args[4]) Modified: pypy/branch/jit-str/pypy/jit/backend/x86/rx86.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/backend/x86/rx86.py (original) +++ pypy/branch/jit-str/pypy/jit/backend/x86/rx86.py Thu Sep 23 17:39:57 2010 @@ -506,6 +506,7 @@ LEA32_rb = insn(rex_w, '\x8D', register(1,8),stack_bp(2,force_32bits=True)) LEA_ra = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_scaled_reg_plus_const(2)) LEA_rm = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_const(2)) + LEA_rj = insn(rex_w, '\x8D', register(1, 8), '\x05', immediate(2)) CALL_l = insn('\xE8', relative(1)) CALL_r = insn(rex_nw, '\xFF', register(1), chr(0xC0 | (2<<3))) Added: pypy/branch/jit-str/pypy/jit/backend/x86/test/test_string.py ============================================================================== --- (empty file) +++ pypy/branch/jit-str/pypy/jit/backend/x86/test/test_string.py Thu Sep 23 17:39:57 2010 @@ -0,0 +1,9 @@ +import py +from pypy.jit.metainterp.test import test_string +from pypy.jit.backend.x86.test.test_basic import Jit386Mixin + +class TestString(Jit386Mixin, test_string.StringTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_string.py + CALL = 'call' + CALL_PURE = 'call_pure' From fijal at codespeak.net Thu Sep 23 22:26:06 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Thu, 23 Sep 2010 22:26:06 +0200 (CEST) Subject: [pypy-svn] r77318 - in pypy/trunk/pypy: interpreter module/pypyjit/test objspace/std Message-ID: <20100923202606.9532B282C09@codespeak.net> Author: fijal Date: Thu Sep 23 22:26:03 2010 New Revision: 77318 Modified: pypy/trunk/pypy/interpreter/baseobjspace.py pypy/trunk/pypy/interpreter/pyopcode.py pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py pypy/trunk/pypy/objspace/std/objspace.py Log: A slight simplification of unpackiterable handling. also provide a second version of unpackiterable that provides possibility to unroll (called from unpack_sequence opcode). Corresponding pypy_c_jit test Modified: pypy/trunk/pypy/interpreter/baseobjspace.py ============================================================================== --- pypy/trunk/pypy/interpreter/baseobjspace.py (original) +++ pypy/trunk/pypy/interpreter/baseobjspace.py Thu Sep 23 22:26:03 2010 @@ -12,6 +12,7 @@ from pypy.rlib.timer import DummyTimer, Timer from pypy.rlib.rarithmetic import r_uint from pypy.rlib import jit +from pypy.tool.sourcetools import func_with_new_name import os, sys, py __all__ = ['ObjSpace', 'OperationError', 'Wrappable', 'W_Root'] @@ -749,12 +750,17 @@ (i, plural))) return items + unpackiterable_unroll = jit.unroll_safe(func_with_new_name(unpackiterable, + 'unpackiterable_unroll')) + def fixedview(self, w_iterable, expected_length=-1): """ A fixed list view of w_iterable. Don't modify the result """ return make_sure_not_resized(self.unpackiterable(w_iterable, expected_length)[:]) + fixedview_unroll = fixedview + def listview(self, w_iterable, expected_length=-1): """ A non-fixed view of w_iterable. Don't modify the result """ Modified: pypy/trunk/pypy/interpreter/pyopcode.py ============================================================================== --- pypy/trunk/pypy/interpreter/pyopcode.py (original) +++ pypy/trunk/pypy/interpreter/pyopcode.py Thu Sep 23 22:26:03 2010 @@ -637,7 +637,7 @@ def UNPACK_SEQUENCE(self, itemcount, next_instr): w_iterable = self.popvalue() - items = self.space.fixedview(w_iterable, itemcount) + items = self.space.fixedview_unroll(w_iterable, itemcount) self.pushrevvalues(itemcount, items) def STORE_ATTR(self, nameindex, next_instr): Modified: pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py ============================================================================== --- pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py (original) +++ pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py Thu Sep 23 22:26:03 2010 @@ -869,6 +869,24 @@ return intimg[i - 1] ''', maxops, ([tc], res)) + def test_unpackiterable(self): + self.run_source(''' + from array import array + + def main(): + i = 0 + t = array('l', (1, 2)) + while i < 2000: + a, b = t + i += 1 + return 3 + + ''', 100, ([], 3)) + bytecode, = self.get_by_bytecode("UNPACK_SEQUENCE") + # we allocate virtual ref and frame, we don't want block + assert len(bytecode.get_opnames('call_may_force')) == 0 + + def test_intbound_simple(self): ops = ('<', '>', '<=', '>=', '==', '!=') nbr = (3, 7) Modified: pypy/trunk/pypy/objspace/std/objspace.py ============================================================================== --- pypy/trunk/pypy/objspace/std/objspace.py (original) +++ pypy/trunk/pypy/objspace/std/objspace.py Thu Sep 23 22:26:03 2010 @@ -7,7 +7,7 @@ from pypy.objspace.std import (builtinshortcut, stdtypedef, frame, model, transparent, callmethod, proxyobject) from pypy.objspace.descroperation import DescrOperation, raiseattrerror -from pypy.rlib.objectmodel import instantiate, r_dict +from pypy.rlib.objectmodel import instantiate, r_dict, specialize from pypy.rlib.debug import make_sure_not_resized from pypy.rlib.rarithmetic import base_int from pypy.rlib.objectmodel import we_are_translated @@ -350,7 +350,8 @@ raise self._wrap_expected_length(expected_length, len(t)) return t - def fixedview(self, w_obj, expected_length=-1): + @specialize.arg(3) + def fixedview(self, w_obj, expected_length=-1, unroll=False): """ Fast paths """ if isinstance(w_obj, W_TupleObject): @@ -358,18 +359,26 @@ elif isinstance(w_obj, W_ListObject): t = w_obj.wrappeditems[:] else: - return ObjSpace.fixedview(self, w_obj, expected_length) + if unroll: + return make_sure_not_resized(ObjSpace.unpackiterable_unroll( + self, w_obj, expected_length)[:]) + else: + return make_sure_not_resized(ObjSpace.unpackiterable( + self, w_obj, expected_length)[:]) if expected_length != -1 and len(t) != expected_length: raise self._wrap_expected_length(expected_length, len(t)) return t + def fixedview_unroll(self, w_obj, expected_length=-1): + return self.fixedview(w_obj, expected_length, unroll=True) + def listview(self, w_obj, expected_length=-1): if isinstance(w_obj, W_ListObject): t = w_obj.wrappeditems elif isinstance(w_obj, W_TupleObject): t = w_obj.wrappeditems[:] else: - return ObjSpace.listview(self, w_obj, expected_length) + return ObjSpace.unpackiterable(self, w_obj, expected_length) if expected_length != -1 and len(t) != expected_length: raise self._wrap_expected_length(expected_length, len(t)) return t From afa at codespeak.net Thu Sep 23 23:02:04 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Thu, 23 Sep 2010 23:02:04 +0200 (CEST) Subject: [pypy-svn] r77319 - pypy/branch/fast-forward/pypy/rlib/test Message-ID: <20100923210204.4F844282C09@codespeak.net> Author: afa Date: Thu Sep 23 23:02:02 2010 New Revision: 77319 Modified: pypy/branch/fast-forward/pypy/rlib/test/test_rstruct.py Log: A failing test that shows why pypy-c crashes the second time. Modified: pypy/branch/fast-forward/pypy/rlib/test/test_rstruct.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/test/test_rstruct.py (original) +++ pypy/branch/fast-forward/pypy/rlib/test/test_rstruct.py Thu Sep 23 23:02:02 2010 @@ -1,7 +1,9 @@ from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin from pypy.rlib.rstruct.runpack import runpack +from pypy.rlib.rstruct import ieee from pypy.rlib.rarithmetic import LONG_BIT +from pypy.translator.c.test.test_genc import compile import struct class BaseTestRStruct(BaseRtypingTest): @@ -34,3 +36,18 @@ class TestOOType(BaseTestRStruct, OORtypeMixin): pass + +class TestCompiled: + def test_pack_float(self): + def pack(x): + result = [] + ieee.pack_float(result, x, 8, False) + return ''.join(result) + c_pack = compile(pack, [float]) + def unpack(s): + return ieee.unpack_float(s, False) + c_unpack = compile(unpack, [str]) + + s = c_pack(123.456) + assert s == pack(123.456) + assert c_unpack(s) == 123.456 From afa at codespeak.net Fri Sep 24 00:05:14 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Fri, 24 Sep 2010 00:05:14 +0200 (CEST) Subject: [pypy-svn] r77320 - pypy/branch/fast-forward/pypy/rlib/rstruct Message-ID: <20100923220514.AB6F2282C0E@codespeak.net> Author: afa Date: Fri Sep 24 00:05:13 2010 New Revision: 77320 Modified: pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py Log: Skip the check, this fixes the test. Modified: pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py Fri Sep 24 00:05:13 2010 @@ -43,8 +43,12 @@ else: raise ValueError("invalid size value") - if Q >> BITS: - raise ValueError("input out of range") + if not objectmodel.we_are_translated(): + # This tests generates wrong code when translated: + # with gcc, shifting a 64bit int by 64 bits does + # not change the value. + if Q >> BITS: + raise ValueError("input out of range") # extract pieces one = r_ulonglong(1) From afa at codespeak.net Fri Sep 24 00:41:41 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Fri, 24 Sep 2010 00:41:41 +0200 (CEST) Subject: [pypy-svn] r77321 - pypy/branch/fast-forward/lib_pypy/_ctypes Message-ID: <20100923224141.1D604282C09@codespeak.net> Author: afa Date: Fri Sep 24 00:41:39 2010 New Revision: 77321 Modified: pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py pypy/branch/fast-forward/lib_pypy/_ctypes/builtin.py pypy/branch/fast-forward/lib_pypy/_ctypes/primitive.py Log: Fix last issues when importing ctypes Modified: pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py (original) +++ pypy/branch/fast-forward/lib_pypy/_ctypes/__init__.py Fri Sep 24 00:41:39 2010 @@ -5,11 +5,12 @@ from _ctypes.pointer import _Pointer, _cast_addr from _ctypes.pointer import POINTER, pointer, _pointer_type_cache from _ctypes.function import CFuncPtr -from _ctypes.dll import dlopen as LoadLibrary +from _ctypes.dll import dlopen from _ctypes.structure import Structure from _ctypes.array import Array -from _ctypes.builtin import _memmove_addr, _string_at, _memset_addr,\ - set_conversion_mode, _wstring_at +from _ctypes.builtin import ( + _memmove_addr, _memset_addr, + _string_at_addr, _wstring_at_addr, set_conversion_mode) from _ctypes.union import Union import os as _os @@ -18,6 +19,7 @@ from _rawffi import FormatError from _rawffi import check_HRESULT as _check_HRESULT CopyComPointer = None # XXX + LoadLibrary = dlopen from _rawffi import FUNCFLAG_STDCALL, FUNCFLAG_CDECL, FUNCFLAG_PYTHONAPI from _rawffi import FUNCFLAG_USE_ERRNO, FUNCFLAG_USE_LASTERROR Modified: pypy/branch/fast-forward/lib_pypy/_ctypes/builtin.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/_ctypes/builtin.py (original) +++ pypy/branch/fast-forward/lib_pypy/_ctypes/builtin.py Fri Sep 24 00:41:39 2010 @@ -8,7 +8,7 @@ _memmove_addr = _rawffi.get_libc().getaddressindll('memmove') _memset_addr = _rawffi.get_libc().getaddressindll('memset') -def _string_at(addr, lgt): +def _string_at_addr(addr, lgt): # address here can be almost anything import ctypes arg = ctypes.c_void_p._CData_value(addr) @@ -20,7 +20,7 @@ ConvMode.encoding = encoding return old_cm -def _wstring_at(addr, lgt): +def _wstring_at_addr(addr, lgt): import ctypes arg = ctypes.c_void_p._CData_value(addr) # XXX purely applevel Modified: pypy/branch/fast-forward/lib_pypy/_ctypes/primitive.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/_ctypes/primitive.py (original) +++ pypy/branch/fast-forward/lib_pypy/_ctypes/primitive.py Fri Sep 24 00:41:39 2010 @@ -142,13 +142,13 @@ result.value = property(_getvalue, _setvalue) elif tp == 'Z': # c_wchar_p - from _ctypes import _wstring_at + from _ctypes import _wstring_at_addr def _getvalue(self): addr = self._buffer[0] if addr == 0: return None else: - return _wstring_at(addr, -1) + return _wstring_at_addr(addr, -1) def _setvalue(self, value): if isinstance(value, basestring): @@ -217,14 +217,14 @@ SysAllocStringLen = windll.oleaut32.SysAllocStringLen SysStringLen = windll.oleaut32.SysStringLen SysFreeString = windll.oleaut32.SysFreeString - from _ctypes import _wstring_at + from _ctypes import _wstring_at_addr def _getvalue(self): addr = self._buffer[0] if addr == 0: return None else: size = SysStringLen(addr) - return _wstring_at(addr, size) + return _wstring_at_addr(addr, size) def _setvalue(self, value): if isinstance(value, basestring): From afa at codespeak.net Fri Sep 24 08:32:58 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Fri, 24 Sep 2010 08:32:58 +0200 (CEST) Subject: [pypy-svn] r77322 - pypy/branch/fast-forward/pypy/rlib/rstruct Message-ID: <20100924063258.33B1A282B90@codespeak.net> Author: afa Date: Fri Sep 24 08:32:55 2010 New Revision: 77322 Modified: pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py Log: Fix same previous test on 32bit platform. Should fix pypy-c crashes Modified: pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py Fri Sep 24 08:32:55 2010 @@ -54,7 +54,7 @@ one = r_ulonglong(1) sign = rarithmetic.intmask(Q >> BITS - 1) exp = rarithmetic.intmask((Q & ((one << BITS - 1) - (one << MANT_DIG - 1))) >> MANT_DIG - 1) - mant = Q & ((1 << MANT_DIG - 1) - 1) + mant = Q & ((one << MANT_DIG - 1) - 1) if exp == MAX_EXP - MIN_EXP + 2: # nan or infinity @@ -64,7 +64,7 @@ result = math.ldexp(mant, MIN_EXP - MANT_DIG) else: # normal - mant += 1 << MANT_DIG - 1 + mant += one << MANT_DIG - 1 result = math.ldexp(mant, exp + MIN_EXP - MANT_DIG - 1) return -result if sign else result @@ -128,6 +128,8 @@ assert 0 <= mant < 1 << MANT_DIG - 1 assert 0 <= exp <= MAX_EXP - MIN_EXP + 2 assert 0 <= sign <= 1 + + exp = r_ulonglong(exp) return ((sign << BITS - 1) | (exp << MANT_DIG - 1)) | mant From afa at codespeak.net Fri Sep 24 08:38:07 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Fri, 24 Sep 2010 08:38:07 +0200 (CEST) Subject: [pypy-svn] r77323 - in pypy/branch/fast-forward/pypy/rlib: rstruct test Message-ID: <20100924063807.25AD4282B90@codespeak.net> Author: afa Date: Fri Sep 24 08:38:05 2010 New Revision: 77323 Modified: pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py pypy/branch/fast-forward/pypy/rlib/test/test_rstruct.py Log: Fix another 32bit issue (a float needs a 64bit number...) Modified: pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py Fri Sep 24 08:38:05 2010 @@ -130,6 +130,7 @@ assert 0 <= sign <= 1 exp = r_ulonglong(exp) + sign = r_ulonglong(sign) return ((sign << BITS - 1) | (exp << MANT_DIG - 1)) | mant Modified: pypy/branch/fast-forward/pypy/rlib/test/test_rstruct.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/test/test_rstruct.py (original) +++ pypy/branch/fast-forward/pypy/rlib/test/test_rstruct.py Fri Sep 24 08:38:05 2010 @@ -51,3 +51,7 @@ s = c_pack(123.456) assert s == pack(123.456) assert c_unpack(s) == 123.456 + + s = c_pack(-123.456) + assert s == pack(-123.456) + assert c_unpack(s) == -123.456 From arigo at codespeak.net Fri Sep 24 09:54:03 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 24 Sep 2010 09:54:03 +0200 (CEST) Subject: [pypy-svn] r77324 - in pypy/branch/jit-str/pypy: jit/codewriter jit/codewriter/test rpython/lltypesystem Message-ID: <20100924075403.92A6D282B90@codespeak.net> Author: arigo Date: Fri Sep 24 09:54:01 2010 New Revision: 77324 Modified: pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py Log: Simplify the OS_STR_SLICE_xyz specs; we can keep only one, the most general version. Modified: pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py Fri Sep 24 09:54:01 2010 @@ -19,13 +19,9 @@ OS_NONE = 0 # normal case, no oopspec OS_ARRAYCOPY = 1 # "list.ll_arraycopy" OS_STR_CONCAT = 2 # "stroruni.concat" - OS_STR_SLICE_STARTONLY = 3 # "stroruni.slice_startonly" - OS_STR_SLICE_STARTSTOP = 4 # "stroruni.slice_startstop" - OS_STR_SLICE_MINUSONE = 5 # "stroruni.slice_minusone" - OS_UNI_CONCAT = 82 # "stroruni.concat" (+80) - OS_UNI_SLICE_STARTONLY = 83 # "stroruni.slice_startonly" (+80) - OS_UNI_SLICE_STARTSTOP = 84 # "stroruni.slice_startstop" (+80) - OS_UNI_SLICE_MINUSONE = 85 # "stroruni.slice_minusone" (+80) + OS_STR_SLICE = 3 # "stroruni.slice" + OS_UNI_CONCAT = 4 # "stroruni.concat" + OS_UNI_SLICE = 5 # "stroruni.slice" def __new__(cls, readonly_descrs_fields, write_descrs_fields, write_descrs_arrays, Modified: pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py Fri Sep 24 09:54:01 2010 @@ -1043,18 +1043,15 @@ return op1 def _handle_stroruni_call(self, op, oopspec_name, args): - dict = {"stroruni.concat": EffectInfo.OS_STR_CONCAT, - "stroruni.slice_startonly": EffectInfo.OS_STR_SLICE_STARTONLY, - "stroruni.slice_startstop": EffectInfo.OS_STR_SLICE_STARTSTOP, - "stroruni.slice_minusone": EffectInfo.OS_STR_SLICE_MINUSONE} - base = dict[oopspec_name] if args[0].concretetype.TO == rstr.STR: - offset = 0 + dict = {"stroruni.concat": EffectInfo.OS_STR_CONCAT, + "stroruni.slice": EffectInfo.OS_STR_SLICE} elif args[0].concretetype.TO == rstr.UNICODE: - offset = 80 + dict = {"stroruni.concat": EffectInfo.OS_UNI_CONCAT, + "stroruni.slice": EffectInfo.OS_UNI_SLICE} else: assert 0, "args[0].concretetype must be STR or UNICODE" - return self._handle_oopspec_call(op, args, base + offset) + return self._handle_oopspec_call(op, args, dict[oopspec_name]) # ---------- # VirtualRefs. Modified: pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/test/test_jtransform.py Fri Sep 24 09:54:01 2010 @@ -724,67 +724,48 @@ assert got[0] == op1.args[1] # the calldescr assert heaptracker.int2adr(got[1]) == llmemory.cast_ptr_to_adr(func) -def test_str_stringslice_startonly(): +def test_str_slice(): # test that the oopspec is present and correctly transformed PSTR = lltype.Ptr(rstr.STR) INT = lltype.Signed - FUNC = lltype.FuncType([PSTR, INT], PSTR) - func = lltype.functionptr(FUNC, 'll_stringslice_startonly', - _callable=rstr.LLHelpers.ll_stringslice_startonly) + FUNC = lltype.FuncType([PSTR, INT, INT], PSTR) + func = lltype.functionptr(FUNC, '_ll_stringslice', + _callable=rstr.LLHelpers._ll_stringslice) v1 = varoftype(PSTR) v2 = varoftype(INT) - v3 = varoftype(PSTR) - op = SpaceOperation('direct_call', [const(func), v1, v2], v3) + v3 = varoftype(INT) + v4 = varoftype(PSTR) + op = SpaceOperation('direct_call', [const(func), v1, v2, v3], v4) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) assert op1.opname == 'residual_call_ir_r' assert op1.args[0].value == func - assert op1.args[1] == 'calldescr-%d' % ( - effectinfo.EffectInfo.OS_STR_SLICE_STARTONLY) - assert op1.args[2] == ListOfKind('int', [v2]) + assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_STR_SLICE + assert op1.args[2] == ListOfKind('int', [v2, v3]) assert op1.args[3] == ListOfKind('ref', [v1]) - assert op1.result == v3 + assert op1.result == v4 -def test_str_stringslice_startstop(): +def test_unicode_slice(): # test that the oopspec is present and correctly transformed - PSTR = lltype.Ptr(rstr.STR) + PUNICODE = lltype.Ptr(rstr.UNICODE) INT = lltype.Signed - FUNC = lltype.FuncType([PSTR, INT, INT], PSTR) - func = lltype.functionptr(FUNC, '_ll_stringslice_startstop', - _callable=rstr.LLHelpers._ll_stringslice_startstop) - v1 = varoftype(PSTR) + FUNC = lltype.FuncType([PUNICODE, INT, INT], PUNICODE) + func = lltype.functionptr(FUNC, '_ll_stringslice', + _callable=rstr.LLHelpers._ll_stringslice) + v1 = varoftype(PUNICODE) v2 = varoftype(INT) v3 = varoftype(INT) - v4 = varoftype(PSTR) + v4 = varoftype(PUNICODE) op = SpaceOperation('direct_call', [const(func), v1, v2, v3], v4) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) assert op1.opname == 'residual_call_ir_r' assert op1.args[0].value == func - assert op1.args[1] == 'calldescr-%d' % ( - effectinfo.EffectInfo.OS_STR_SLICE_STARTSTOP) + assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_UNI_SLICE assert op1.args[2] == ListOfKind('int', [v2, v3]) assert op1.args[3] == ListOfKind('ref', [v1]) assert op1.result == v4 -def test_str_stringslice_minusone(): - # test that the oopspec is present and correctly transformed - PSTR = lltype.Ptr(rstr.STR) - FUNC = lltype.FuncType([PSTR], PSTR) - func = lltype.functionptr(FUNC, 'll_stringslice_minusone', - _callable=rstr.LLHelpers.ll_stringslice_minusone) - v1 = varoftype(PSTR) - v2 = varoftype(PSTR) - op = SpaceOperation('direct_call', [const(func), v1], v2) - tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) - op1 = tr.rewrite_operation(op) - assert op1.opname == 'residual_call_r_r' - assert op1.args[0].value == func - assert op1.args[1] == 'calldescr-%d' % ( - effectinfo.EffectInfo.OS_STR_SLICE_MINUSONE) - assert op1.args[2] == ListOfKind('ref', [v1]) - assert op1.result == v2 - def test_list_ll_arraycopy(): from pypy.rlib.rgc import ll_arraycopy LIST = lltype.GcArray(lltype.Signed) Modified: pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py ============================================================================== --- pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py (original) +++ pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py Fri Sep 24 09:54:01 2010 @@ -693,25 +693,25 @@ i += 1 return result + def _ll_stringslice(s1, start, stop): + newstr = s1.malloc(stop - start) + assert start >= 0 + lgt = stop - start + assert lgt >= 0 + s1.copy_contents(s1, newstr, start, 0, lgt) + return newstr + _ll_stringslice.oopspec = 'stroruni.slice(s1, start, stop)' + def ll_stringslice_startonly(s1, start): len1 = len(s1.chars) + if we_are_jitted(): + return LLHelpers._ll_stringslice(s1, start, len1) newstr = s1.malloc(len1 - start) lgt = len1 - start assert lgt >= 0 assert start >= 0 s1.copy_contents(s1, newstr, start, 0, lgt) return newstr - ll_stringslice_startonly.oopspec = 'stroruni.slice_startonly(s1, start)' - - def _ll_stringslice_startstop(s1, start, stop): - newstr = s1.malloc(stop - start) - assert start >= 0 - lgt = stop - start - assert lgt >= 0 - s1.copy_contents(s1, newstr, start, 0, lgt) - return newstr - _ll_stringslice_startstop.oopspec = ('stroruni.slice_startstop(s1, ' - 'start, stop)') def ll_stringslice_startstop(s1, start, stop): if we_are_jitted(): @@ -722,15 +722,16 @@ if start == 0: return s1 stop = len(s1.chars) - return LLHelpers._ll_stringslice_startstop(s1, start, stop) + return LLHelpers._ll_stringslice(s1, start, stop) def ll_stringslice_minusone(s1): newlen = len(s1.chars) - 1 + if we_are_jitted(): + return LLHelpers._ll_stringslice(s1, 0, newlen) newstr = s1.malloc(newlen) assert newlen >= 0 s1.copy_contents(s1, newstr, 0, 0, newlen) return newstr - ll_stringslice_minusone.oopspec = 'stroruni.slice_minusone(s1)' def ll_split_chr(LIST, s, c): chars = s.chars From arigo at codespeak.net Fri Sep 24 09:54:53 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 24 Sep 2010 09:54:53 +0200 (CEST) Subject: [pypy-svn] r77325 - in pypy/branch/jit-str/pypy/jit/metainterp: optimizeopt test Message-ID: <20100924075453.0A446282B90@codespeak.net> Author: arigo Date: Fri Sep 24 09:54:51 2010 New Revision: 77325 Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py Log: Start to implement virtual slices. Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py Fri Sep 24 09:54:51 2010 @@ -209,10 +209,10 @@ class VStringPlainValue(VAbstractStringValue): """A string built with newstr(const).""" + _lengthbox = None # cache only def setup(self, size): self._chars = [CVAL_ZERO] * size - self._lengthbox = None # cache only def getstrlen(self, _): if self._lengthbox is None: @@ -278,6 +278,37 @@ return modifier.make_vstrconcat() +class VStringSliceValue(VAbstractStringValue): + """A slice.""" + + def setup(self, vstr, vstart, vlength): + self.vstr = vstr + self.vstart = vstart + self.vlength = vlength + + def getstrlen(self, newoperations): + return self.vlength.force_box() + + def string_copy_parts(self, newoperations, targetbox, offsetbox): + lengthbox = self.getstrlen(newoperations) + return copy_str_content(newoperations, + self.vstr.force_box(), targetbox, + self.vstart.force_box(), offsetbox, + lengthbox) + + def get_args_for_fail(self, modifier): + xxx + if self.box is None and not modifier.already_seen_virtual(self.keybox): + charboxes = [value.get_key_box() for value in self._chars] + modifier.register_virtual_fields(self.keybox, charboxes) + for value in self._chars: + value.get_args_for_fail(modifier) + + def _make_virtual(self, modifier): + xxx + return modifier.make_vstrplain() + + def default_string_copy_parts(srcvalue, newoperations, targetbox, offsetbox): # Copies the pointer-to-string 'srcvalue' into the target string # given by 'targetbox', at the specified offset. Returns the offset @@ -285,9 +316,15 @@ srcbox = srcvalue.force_box() lengthbox = BoxInt() newoperations.append(ResOperation(rop.STRLEN, [srcbox], lengthbox)) + return copy_str_content(newoperations, srcbox, targetbox, + CONST_0, offsetbox, lengthbox) + +def copy_str_content(newoperations, srcbox, targetbox, + srcoffsetbox, offsetbox, lengthbox): nextoffsetbox = _int_add(newoperations, offsetbox, lengthbox) newoperations.append(ResOperation(rop.COPYSTRCONTENT, [srcbox, targetbox, - CONST_0, offsetbox, + srcoffsetbox, + offsetbox, lengthbox], None)) return nextoffsetbox @@ -303,6 +340,16 @@ newoperations.append(ResOperation(rop.INT_ADD, [box1, box2], resbox)) return resbox +def _int_sub(newoperations, box1, box2): + if isinstance(box2, ConstInt): + if box2.value == 0: + return box1 + if isinstance(box1, ConstInt): + return ConstInt(box1.value - box2.value) + resbox = BoxInt() + newoperations.append(ResOperation(rop.INT_SUB, [box1, box2], resbox)) + return resbox + class __extend__(SpecNode): def setup_virtual_node(self, optimizer, box, newinputargs): @@ -402,6 +449,11 @@ self.make_equal_to(box, vvalue) return vvalue + def make_vstring_slice(self, box, source_op=None): + vvalue = VStringSliceValue(self.optimizer, box, source_op) + self.make_equal_to(box, vvalue) + return vvalue + def optimize_JUMP(self, op): orgop = self.optimizer.loop.operations[-1] exitargs = [] @@ -623,9 +675,28 @@ len2box = vright.getstrlen(newoperations) lengthbox = _int_add(newoperations, len1box, len2box) value = self.make_vstring_concat(op.result, op) - value.setup(left = self.getvalue(op.args[1]), - right = self.getvalue(op.args[2]), - lengthbox = lengthbox) + value.setup(vleft, vright, lengthbox) + return True + + def opt_call_oopspec_STR_SLICE(self, op): + newoperations = self.optimizer.newoperations + vstr = self.getvalue(op.args[1]) + vstart = self.getvalue(op.args[2]) + vstop = self.getvalue(op.args[3]) + lengthbox = _int_sub(newoperations, vstop.force_box(), + vstart.force_box()) + value = self.make_vstring_slice(op.result, op) + # + if isinstance(vstr, VStringSliceValue): + # double slicing s[i:j][k:l] + vintermediate = vstr + vstr = vintermediate.vstr + startbox = _int_add(newoperations, + vintermediate.vstart.force_box(), + vstart.force_box()) + vstart = self.getvalue(startbox) + # + value.setup(vstr, vstart, self.getvalue(lengthbox)) return True def propagate_forward(self, op): Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py Fri Sep 24 09:54:51 2010 @@ -119,6 +119,9 @@ EffectInfo([], [], [], oopspecindex=EffectInfo.OS_ARRAYCOPY)) strconcatdescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [], [], oopspecindex=EffectInfo.OS_STR_CONCAT)) + slicedescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], oopspecindex=EffectInfo.OS_STR_SLICE)) + class LoopToken(AbstractDescr): pass asmdescr = LoopToken() # it can be whatever, it's not a descr though Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py Fri Sep 24 09:54:51 2010 @@ -3921,7 +3921,7 @@ """ self.optimize_loop(ops, 'Not, Not', expected) - def test_concat_1(self): + def test_str_concat_1(self): ops = """ [p1, p2] p3 = call(0, p1, p2, descr=strconcatdescr) @@ -3942,7 +3942,7 @@ """ self.optimize_loop(ops, 'Not, Not', expected) - def test_concat_vstr2_str(self): + def test_str_concat_vstr2_str(self): ops = """ [i0, i1, p2] p1 = newstr(2) @@ -3965,7 +3965,7 @@ """ self.optimize_loop(ops, 'Not, Not, Not', expected) - def test_concat_str_vstr2(self): + def test_str_concat_str_vstr2(self): ops = """ [i0, i1, p2] p1 = newstr(2) @@ -3989,7 +3989,7 @@ """ self.optimize_loop(ops, 'Not, Not, Not', expected) - def test_concat_str_str_str(self): + def test_str_concat_str_str_str(self): ops = """ [p1, p2, p3] p4 = call(0, p1, p2, descr=strconcatdescr) @@ -4016,6 +4016,53 @@ """ self.optimize_loop(ops, 'Not, Not, Not', expected) + def test_str_slice_1(self): + ops = """ + [p1, i1, i2] + p2 = call(0, p1, i1, i2, descr=slicedescr) + jump(p2, i1, i2) + """ + expected = """ + [p1, i1, i2] + i3 = int_sub(i2, i1) + p2 = newstr(i3) + copystrcontent(p1, p2, i1, 0, i3) + jump(p2, i1, i2) + """ + self.optimize_loop(ops, 'Not, Not, Not', expected) + + def test_str_slice_2(self): + ops = """ + [p1, i2] + p2 = call(0, p1, 0, i2, descr=slicedescr) + jump(p2, i2) + """ + expected = """ + [p1, i2] + p2 = newstr(i2) + copystrcontent(p1, p2, 0, 0, i2) + jump(p2, i2) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_str_slice_3(self): + ops = """ + [p1, i1, i2, i3, i4] + p2 = call(0, p1, i1, i2, descr=slicedescr) + p3 = call(0, p2, i3, i4, descr=slicedescr) + jump(p3, i1, i2, i3, i4) + """ + expected = """ + [p1, i1, i2, i3, i4] + i0 = int_sub(i2, i1) # killed by the backend + i5 = int_sub(i4, i3) + i6 = int_add(i1, i3) + p3 = newstr(i5) + copystrcontent(p1, p3, i6, 0, i5) + jump(p3, i1, i2, i3, i4) + """ + self.optimize_loop(ops, 'Not, Not, Not, Not, Not', expected) + ##class TestOOtype(BaseTestOptimizeOpt, OOtypeMixin): Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py Fri Sep 24 09:54:51 2010 @@ -135,25 +135,6 @@ newunicode=0, unicodegetitem=0, unicodesetitem=0, unicodelen=0) - def test_slice_startonly(self): - if 1: # xxx unicode - jitdriver = JitDriver(greens = [], reds = ['m', 'total']) - def f(m): - total = 0 - while m >= 0: - jitdriver.can_enter_jit(m=m, total=total) - jitdriver.jit_merge_point(m=m, total=total) - string = 's0dgkwn349tXOGIEQR!'[m:] - c = string[2*m] - total += ord(c) - m -= 1 - return total - res = self.meta_interp(f, [6]) - assert res == sum(map(ord, 'sgn9OE!')) - py.test.xfail() - self.check_loops(call=0, call_pure=0, - newstr=0, strgetitem=1, strsetitem=0, strlen=0) - def test_strconcat_pure(self): for somestr in ["abc", ]: #u"def"]: jitdriver = JitDriver(greens = [], reds = ['m', 'n']) From arigo at codespeak.net Fri Sep 24 09:59:28 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 24 Sep 2010 09:59:28 +0200 (CEST) Subject: [pypy-svn] r77326 - pypy/branch/jit-str/pypy/jit/metainterp/test Message-ID: <20100924075928.06F90282B90@codespeak.net> Author: arigo Date: Fri Sep 24 09:59:27 2010 New Revision: 77326 Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py Log: The next test passes (amazingly). Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py Fri Sep 24 09:59:27 2010 @@ -4063,6 +4063,27 @@ """ self.optimize_loop(ops, 'Not, Not, Not, Not, Not', expected) + def test_str_slice_concat(self): + ops = """ + [p1, i1, i2, p2] + p3 = call(0, p1, i1, i2, descr=slicedescr) + p4 = call(0, p3, p2, descr=strconcatdescr) + jump(p4, i1, i2, p2) + """ + expected = """ + [p1, i1, i2, p2] + i3 = int_sub(i2, i1) # length of p3 + i4 = strlen(p2) + i5 = int_add(i3, i4) + p4 = newstr(i5) + copystrcontent(p1, p4, i1, 0, i3) + i4b = strlen(p2) + i6 = int_add(i3, i4b) # killed by the backend + copystrcontent(p2, p4, 0, i3, i4b) + jump(p4, i1, i2, p2) + """ + self.optimize_loop(ops, 'Not, Not, Not, Not', expected) + ##class TestOOtype(BaseTestOptimizeOpt, OOtypeMixin): From arigo at codespeak.net Fri Sep 24 10:20:35 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 24 Sep 2010 10:20:35 +0200 (CEST) Subject: [pypy-svn] r77327 - in pypy/branch/jit-str/pypy/jit/metainterp: . optimizeopt test Message-ID: <20100924082035.E5E77282B90@codespeak.net> Author: arigo Date: Fri Sep 24 10:20:34 2010 New Revision: 77327 Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/jit-str/pypy/jit/metainterp/resume.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py Log: Resume data for string slices. Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py Fri Sep 24 10:20:34 2010 @@ -297,16 +297,17 @@ lengthbox) def get_args_for_fail(self, modifier): - xxx if self.box is None and not modifier.already_seen_virtual(self.keybox): - charboxes = [value.get_key_box() for value in self._chars] - modifier.register_virtual_fields(self.keybox, charboxes) - for value in self._chars: - value.get_args_for_fail(modifier) + boxes = [self.vstr.get_key_box(), + self.vstart.get_key_box(), + self.vlength.get_key_box()] + modifier.register_virtual_fields(self.keybox, boxes) + self.vstr.get_args_for_fail(modifier) + self.vstart.get_args_for_fail(modifier) + self.vlength.get_args_for_fail(modifier) def _make_virtual(self, modifier): - xxx - return modifier.make_vstrplain() + return modifier.make_vstrslice() def default_string_copy_parts(srcvalue, newoperations, targetbox, offsetbox): Modified: pypy/branch/jit-str/pypy/jit/metainterp/resume.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/resume.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/resume.py Fri Sep 24 10:20:34 2010 @@ -261,6 +261,9 @@ def make_vstrconcat(self): return VStrConcatInfo() + def make_vstrslice(self): + return VStrSliceInfo() + def register_virtual_fields(self, virtualbox, fieldboxes): tagged = self.liveboxes_from_env.get(virtualbox, UNASSIGNEDVIRTUAL) self.liveboxes[virtualbox] = tagged @@ -528,13 +531,33 @@ def setfields(self, decoder, string): # we do everything in allocate(); no risk of circular data structure # with strings. - pass + pass def debug_prints(self): debug_print("\tvstrconcatinfo") for i in self.fieldnums: debug_print("\t\t", str(untag(i))) + +class VStrSliceInfo(AbstractVirtualInfo): + """Stands for the string made out of slicing another string.""" + + @specialize.argtype(1) + def allocate(self, decoder): + str, start, length = self.fieldnums + return decoder.slice_string(str, start, length) + + @specialize.argtype(1) + def setfields(self, decoder, string): + # we do everything in allocate(); no risk of circular data structure + # with strings. + pass + + def debug_prints(self): + debug_print("\tvstrsliceinfo") + for i in self.fieldnums: + debug_print("\t\t", str(untag(i))) + # ____________________________________________________________ class AbstractResumeDataReader(object): @@ -682,6 +705,16 @@ return self.metainterp.execute_and_record_varargs( rop.CALL, [ConstInt(func), str1box, str2box], calldescr) + def slice_string(self, strnum, startnum, lengthnum): + calldescr, func = callinfo_for_oopspec(EffectInfo.OS_STR_SLICE) + strbox = self.decode_box(strnum, REF) + startbox = self.decode_box(startnum, INT) + lengthbox = self.decode_box(lengthnum, INT) + stopbox = self.metainterp.execute_and_record(rop.INT_ADD, None, + startbox, lengthbox) + return self.metainterp.execute_and_record_varargs( + rop.CALL, [ConstInt(func), strbox, startbox, stopbox], calldescr) + def setfield(self, descr, structbox, fieldnum): if descr.is_pointer_field(): kind = REF @@ -911,6 +944,15 @@ result = funcptr(str1, str2) return lltype.cast_opaque_ptr(llmemory.GCREF, result) + def slice_string(self, strnum, startnum, lengthnum): + str = self.decode_ref(strnum) + start = self.decode_int(startnum) + length = self.decode_int(lengthnum) + str = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), str) + funcptr = funcptr_for_oopspec(EffectInfo.OS_STR_SLICE) + result = funcptr(str, start, start + length) + return lltype.cast_opaque_ptr(llmemory.GCREF, result) + def setfield(self, descr, struct, fieldnum): if descr.is_pointer_field(): newvalue = self.decode_ref(fieldnum) Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py Fri Sep 24 10:20:34 2010 @@ -263,6 +263,24 @@ return 42 self.meta_interp(f, [6, 10]) + def test_strslice(self): + for somestr in ["abc", ]: #u"def"]: + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + def f(n, m): + assert n >= 0 + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = "foobarbazetc"[m:n] + if m <= 5: + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [10, 10]) + class TestOOtype(StringTests, OOJitMixin): CALL = "oosend" From arigo at codespeak.net Fri Sep 24 11:04:14 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 24 Sep 2010 11:04:14 +0200 (CEST) Subject: [pypy-svn] r77328 - in pypy/branch/jit-str/pypy/jit: backend/llgraph metainterp/optimizeopt metainterp/test Message-ID: <20100924090414.4B927282C25@codespeak.net> Author: arigo Date: Fri Sep 24 11:04:11 2010 New Revision: 77328 Modified: pypy/branch/jit-str/pypy/jit/backend/llgraph/llimpl.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py Log: Try to recognize that constant strings of length 1 are actually characters. Modified: pypy/branch/jit-str/pypy/jit/backend/llgraph/llimpl.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/backend/llgraph/llimpl.py (original) +++ pypy/branch/jit-str/pypy/jit/backend/llgraph/llimpl.py Fri Sep 24 11:04:11 2010 @@ -1385,11 +1385,15 @@ def do_copystrcontent(src, dst, srcstart, dststart, length): src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), src) dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), dst) + assert 0 <= srcstart <= srcstart + length <= len(src.chars) + assert 0 <= dststart <= dststart + length <= len(dst.chars) rstr.copy_string_contents(src, dst, srcstart, dststart, length) def do_copyunicodecontent(src, dst, srcstart, dststart, length): src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), src) dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), dst) + assert 0 <= srcstart <= srcstart + length <= len(src.chars) + assert 0 <= dststart <= dststart + length <= len(dst.chars) rstr.copy_unicode_contents(src, dst, srcstart, dststart, length) # ---------- call ---------- Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py Fri Sep 24 11:04:11 2010 @@ -9,7 +9,7 @@ from pypy.jit.metainterp.optimizeutil import InvalidLoop, args_dict from pypy.jit.metainterp import resume, compile from pypy.jit.metainterp.typesystem import llhelper, oohelper -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, rstr from pypy.jit.metainterp.history import AbstractDescr, make_hashable_int from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded @@ -127,10 +127,16 @@ raise NotImplementedError def getstrlen(self, newoperations): - box = self.force_box() - lengthbox = BoxInt() - newoperations.append(ResOperation(rop.STRLEN, [box], lengthbox)) - return lengthbox + if self.is_constant(): + s = self.box.getref(lltype.Ptr(rstr.STR)) + length = len(s.chars) + return ConstInt(length) + else: + self.ensure_nonnull() + box = self.force_box() + lengthbox = BoxInt() + newoperations.append(ResOperation(rop.STRLEN, [box], lengthbox)) + return lengthbox def string_copy_parts(self, *args): from pypy.jit.metainterp.optimizeopt import virtualize Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py Fri Sep 24 11:04:11 2010 @@ -314,20 +314,35 @@ # Copies the pointer-to-string 'srcvalue' into the target string # given by 'targetbox', at the specified offset. Returns the offset # at the end of the copy. + lengthbox = srcvalue.getstrlen(newoperations) srcbox = srcvalue.force_box() - lengthbox = BoxInt() - newoperations.append(ResOperation(rop.STRLEN, [srcbox], lengthbox)) return copy_str_content(newoperations, srcbox, targetbox, CONST_0, offsetbox, lengthbox) def copy_str_content(newoperations, srcbox, targetbox, srcoffsetbox, offsetbox, lengthbox): - nextoffsetbox = _int_add(newoperations, offsetbox, lengthbox) - newoperations.append(ResOperation(rop.COPYSTRCONTENT, [srcbox, targetbox, - srcoffsetbox, - offsetbox, - lengthbox], None)) - return nextoffsetbox + if isinstance(srcbox, ConstPtr) and isinstance(srcoffsetbox, Const): + M = 5 + else: + M = 2 + if isinstance(lengthbox, ConstInt) and lengthbox.value <= M: + # up to M characters are done "inline", i.e. with STRGETITEM/STRSETITEM + # instead of just a COPYSTRCONTENT. + for i in range(lengthbox.value): + charbox = _strgetitem(newoperations, srcbox, srcoffsetbox) + srcoffsetbox = _int_add(newoperations, srcoffsetbox, CONST_1) + newoperations.append(ResOperation(rop.STRSETITEM, [targetbox, + offsetbox, + charbox], None)) + offsetbox = _int_add(newoperations, offsetbox, CONST_1) + else: + nextoffsetbox = _int_add(newoperations, offsetbox, lengthbox) + op = ResOperation(rop.COPYSTRCONTENT, [srcbox, targetbox, + srcoffsetbox, offsetbox, + lengthbox], None) + newoperations.append(op) + offsetbox = nextoffsetbox + return offsetbox def _int_add(newoperations, box1, box2): if isinstance(box1, ConstInt): @@ -351,6 +366,16 @@ newoperations.append(ResOperation(rop.INT_SUB, [box1, box2], resbox)) return resbox +def _strgetitem(newoperations, strbox, indexbox): + # hum, this repetition of the operations is not quite right + if isinstance(strbox, ConstPtr) and isinstance(indexbox, ConstInt): + s = strbox.getref(lltype.Ptr(rstr.STR)) + return ConstInt(ord(s.chars[indexbox.getint()])) + resbox = BoxInt() + newoperations.append(ResOperation(rop.STRGETITEM, [strbox, indexbox], + resbox)) + return resbox + class __extend__(SpecNode): def setup_virtual_node(self, optimizer, box, newinputargs): @@ -661,12 +686,8 @@ def optimize_STRLEN(self, op): value = self.getvalue(op.args[0]) - if isinstance(value, VStringPlainValue): # even if no longer virtual - lengthbox = value.getstrlen(self.optimizer.newoperations) - self.make_equal_to(op.result, self.getvalue(lengthbox)) - else: - value.ensure_nonnull() - self.emit_operation(op) + lengthbox = value.getstrlen(self.optimizer.newoperations) + self.make_equal_to(op.result, self.getvalue(lengthbox)) def opt_call_oopspec_STR_CONCAT(self, op): vleft = self.getvalue(op.args[1]) Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py Fri Sep 24 11:04:11 2010 @@ -1,6 +1,6 @@ import py, random -from pypy.rpython.lltypesystem import lltype, llmemory, rclass +from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr from pypy.rpython.ootypesystem import ootype from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE @@ -121,6 +121,11 @@ EffectInfo([], [], [], oopspecindex=EffectInfo.OS_STR_CONCAT)) slicedescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [], [], oopspecindex=EffectInfo.OS_STR_SLICE)) + strequaldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], oopspecindex=EffectInfo.OS_STR_EQUAL)) + + mystr1 = lltype.cast_opaque_ptr(llmemory.GCREF, + rstr.string_repr.convert_const("x")) class LoopToken(AbstractDescr): pass Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py Fri Sep 24 11:04:11 2010 @@ -4016,6 +4016,25 @@ """ self.optimize_loop(ops, 'Not, Not, Not', expected) + def test_str_concat_str_cstr1(self): + ops = """ + [p2] + p3 = call(0, p2, ConstPtr(mystr1), descr=strconcatdescr) + jump(p3) + """ + expected = """ + [p2] + i2 = strlen(p2) + i3 = int_add(i2, 1) + p3 = newstr(i3) + i4 = strlen(p2) + copystrcontent(p2, p3, 0, 0, i4) + strsetitem(p3, i4, 120) # == ord('x') == ord(mystr1) + i5 = int_add(i4, 1) # will be killed by the backend + jump(p3) + """ + self.optimize_loop(ops, 'Not', expected) + def test_str_slice_1(self): ops = """ [p1, i1, i2] From afa at codespeak.net Fri Sep 24 11:10:26 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Fri, 24 Sep 2010 11:10:26 +0200 (CEST) Subject: [pypy-svn] r77329 - pypy/branch/fast-forward/lib_pypy Message-ID: <20100924091026.DD428282C25@codespeak.net> Author: afa Date: Fri Sep 24 11:10:25 2010 New Revision: 77329 Modified: pypy/branch/fast-forward/lib_pypy/hashlib.py Log: Break circular import: ctypes.util -> tempfile -> random -> hashlib -> _hashlib -> ctypes.util Modified: pypy/branch/fast-forward/lib_pypy/hashlib.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/hashlib.py (original) +++ pypy/branch/fast-forward/lib_pypy/hashlib.py Fri Sep 24 11:10:25 2010 @@ -50,10 +50,24 @@ 'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2' """ -try: - import _hashlib -except ImportError: - _hashlib = None + +# Don't import _hashlib now: our implementation +# uses ctypes.util, which itself somehow import hashlib again... +def __import_hashlib(__memo=[]): + "Cache the result of the import, module or failure" + if __memo: + _hashlib = __memo[0] + else: + try: + import _hashlib + except ImportError: + _hashlib = None + __memo.append(_hashlib) + + if _hashlib: + return _hashlib + else: + raise ImportError("_hashlib") def __get_builtin_constructor(name): if name in ('SHA1', 'sha1'): @@ -81,9 +95,9 @@ optionally initialized with a string. """ try: - if _hashlib: - return _hashlib.new(name, string) - except ValueError: + _hashlib = __import_hashlib() + return _hashlib.new(name, string) + except (ValueError, ImportError): # If the _hashlib module (OpenSSL) doesn't support the named # hash, try using our builtin implementations. # This allows for SHA224/256 and SHA384/512 support even though @@ -94,35 +108,14 @@ new = __hash_new -def _setfuncs(): - # use the wrapper of the C implementation - - sslprefix = 'openssl_' - for opensslfuncname, func in vars(_hashlib).items(): - if not opensslfuncname.startswith(sslprefix): - continue - funcname = opensslfuncname[len(sslprefix):] - try: - # try them all, some may not work due to the OpenSSL - # version not supporting that algorithm. - func() - # Use the C function directly (very fast) - globals()[funcname] = func - except ValueError: - try: - # Use the builtin implementation directly (fast) - globals()[funcname] = __get_builtin_constructor(funcname) - except ValueError: - # this one has no builtin implementation, don't define it - pass - -if _hashlib: - _setfuncs() -else: - # lookup the C function to use directly for the named constructors - md5 = __get_builtin_constructor('md5') - sha1 = __get_builtin_constructor('sha1') - sha224 = __get_builtin_constructor('sha224') - sha256 = __get_builtin_constructor('sha256') - sha384 = __get_builtin_constructor('sha384') - sha512 = __get_builtin_constructor('sha512') +def __getfunc(name): + def new(string=''): + return __hash_new(name, string) + return new + +md5 = __getfunc('md5') +sha1 = __getfunc('sha1') +sha224 = __getfunc('sha224') +sha256 = __getfunc('sha256') +sha384 = __getfunc('sha384') +sha512 = __getfunc('sha512') From arigo at codespeak.net Fri Sep 24 11:24:01 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 24 Sep 2010 11:24:01 +0200 (CEST) Subject: [pypy-svn] r77330 - pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt Message-ID: <20100924092401.2EDDE282C25@codespeak.net> Author: arigo Date: Fri Sep 24 11:23:59 2010 New Revision: 77330 Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py Log: Tweaks. Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py Fri Sep 24 11:23:59 2010 @@ -579,7 +579,7 @@ descr=op.descr) self.make_varray(op.descr, sizebox.getint(), op.result, op) else: - ###self.optimize_default(op) + self.getvalue(op.result).ensure_nonnull() self.emit_operation(op) def optimize_ARRAYLEN_GC(self, op): @@ -661,6 +661,7 @@ vvalue = self.make_vstring_plain(op.result, op) vvalue.setup(length_box.getint()) else: + self.getvalue(op.result).ensure_nonnull() self.emit_operation(op) def optimize_STRSETITEM(self, op): @@ -692,6 +693,8 @@ def opt_call_oopspec_STR_CONCAT(self, op): vleft = self.getvalue(op.args[1]) vright = self.getvalue(op.args[2]) + vleft.ensure_nonnull() + vright.ensure_nonnull() newoperations = self.optimizer.newoperations len1box = vleft.getstrlen(newoperations) len2box = vright.getstrlen(newoperations) @@ -705,6 +708,7 @@ vstr = self.getvalue(op.args[1]) vstart = self.getvalue(op.args[2]) vstop = self.getvalue(op.args[3]) + vstr.ensure_nonnull() lengthbox = _int_sub(newoperations, vstop.force_box(), vstart.force_box()) value = self.make_vstring_slice(op.result, op) From arigo at codespeak.net Fri Sep 24 12:07:00 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 24 Sep 2010 12:07:00 +0200 (CEST) Subject: [pypy-svn] r77331 - in pypy/branch/jit-str/pypy/jit/metainterp: . optimizeopt test Message-ID: <20100924100700.C397F282BD6@codespeak.net> Author: arigo Date: Fri Sep 24 12:06:58 2010 New Revision: 77331 Modified: pypy/branch/jit-str/pypy/jit/metainterp/history.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/jit-str/pypy/jit/metainterp/test/oparser.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py Log: Use prebuilt constant strings instead of generating NEWSTR and a sequence of operations to build a constant string at runtime. Modified: pypy/branch/jit-str/pypy/jit/metainterp/history.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/history.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/history.py Fri Sep 24 12:06:58 2010 @@ -685,6 +685,19 @@ return llmemory.cast_adr_to_int(adr, "emulated") return i +def get_const_ptr_for_string(s): + from pypy.rpython.annlowlevel import llstr + if not we_are_translated(): + try: + return _const_ptr_for_string[s] + except KeyError: + pass + result = ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, llstr(s))) + if not we_are_translated(): + _const_ptr_for_string[s] = result + return result +_const_ptr_for_string = {} + # ____________________________________________________________ # The TreeLoop class contains a loop or a generalized loop, i.e. a tree Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py Fri Sep 24 12:06:58 2010 @@ -12,6 +12,7 @@ from pypy.rpython.lltypesystem import lltype, rstr from pypy.jit.metainterp.history import AbstractDescr, make_hashable_int from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded +from pypy.rpython import annlowlevel LEVEL_UNKNOWN = '\x00' LEVEL_NONNULL = '\x01' @@ -127,10 +128,9 @@ raise NotImplementedError def getstrlen(self, newoperations): - if self.is_constant(): - s = self.box.getref(lltype.Ptr(rstr.STR)) - length = len(s.chars) - return ConstInt(length) + s = self.get_constant_string() + if s is not None: + return ConstInt(len(s)) else: self.ensure_nonnull() box = self.force_box() @@ -138,6 +138,13 @@ newoperations.append(ResOperation(rop.STRLEN, [box], lengthbox)) return lengthbox + def get_constant_string(self): + if self.is_constant(): + s = self.box.getref(lltype.Ptr(rstr.STR)) + return annlowlevel.hlstr(s) + else: + return None + def string_copy_parts(self, *args): from pypy.jit.metainterp.optimizeopt import virtualize return virtualize.default_string_copy_parts(self, *args) @@ -150,6 +157,7 @@ CONST_1 = ConstInt(1) CVAL_ZERO = ConstantValue(CONST_0) CVAL_ZERO_FLOAT = ConstantValue(ConstFloat(0.0)) +CVAL_UNINITIALIZED_ZERO = ConstantValue(CONST_0) llhelper.CVAL_NULLREF = ConstantValue(llhelper.CONST_NULL) oohelper.CVAL_NULLREF = ConstantValue(oohelper.CONST_NULL) Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py Fri Sep 24 12:06:58 2010 @@ -5,6 +5,7 @@ from pypy.jit.metainterp.specnode import VirtualStructSpecNode from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.optimizeutil import _findall +from pypy.jit.metainterp.history import get_const_ptr_for_string from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.optimizeopt.optimizer import * from pypy.jit.codewriter.effectinfo import EffectInfo @@ -199,6 +200,11 @@ class VAbstractStringValue(AbstractVirtualValue): def _really_force(self): + s = self.get_constant_string() + if s is not None: + c_s = get_const_ptr_for_string(s) + self.make_constant(c_s) + return assert self.source_op is not None self.box = box = self.source_op.result newoperations = self.optimizer.newoperations @@ -212,7 +218,7 @@ _lengthbox = None # cache only def setup(self, size): - self._chars = [CVAL_ZERO] * size + self._chars = [CVAL_UNINITIALIZED_ZERO] * size def getstrlen(self, _): if self._lengthbox is None: @@ -226,6 +232,12 @@ assert isinstance(charvalue, OptValue) self._chars[index] = charvalue + def get_constant_string(self): + for c in self._chars: + if c is CVAL_UNINITIALIZED_ZERO or not c.is_constant(): + return None + return ''.join([chr(c.box.getint()) for c in self._chars]) + def string_copy_parts(self, newoperations, targetbox, offsetbox): for i in range(len(self._chars)): charbox = self._chars[i].force_box() @@ -257,6 +269,15 @@ def getstrlen(self, _): return self.lengthbox + def get_constant_string(self): + s1 = self.left.get_constant_string() + if s1 is None: + return None + s2 = self.right.get_constant_string() + if s2 is None: + return None + return s1 + s2 + def string_copy_parts(self, newoperations, targetbox, offsetbox): offsetbox = self.left.string_copy_parts(newoperations, targetbox, offsetbox) @@ -289,6 +310,16 @@ def getstrlen(self, newoperations): return self.vlength.force_box() + def get_constant_string(self): + if self.vstart.is_constant() and self.vlength.is_constant(): + s1 = self.vstr.get_constant_string() + if s1 is None: + return None + start = self.vstart.box.getint() + length = self.vlength.box.getint() + return s1[start : start + length] + return None + def string_copy_parts(self, newoperations, targetbox, offsetbox): lengthbox = self.getstrlen(newoperations) return copy_str_content(newoperations, Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/oparser.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/oparser.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/oparser.py Fri Sep 24 12:06:58 2010 @@ -5,13 +5,12 @@ from pypy.jit.metainterp.history import TreeLoop, BoxInt, ConstInt,\ ConstObj, ConstPtr, Box, BasicFailDescr, BoxFloat, ConstFloat,\ - LoopToken + LoopToken, get_const_ptr_for_string from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.typesystem import llhelper from pypy.jit.codewriter.heaptracker import adr2int from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype -from pypy.rpython.annlowlevel import llstr class ParseError(Exception): pass @@ -145,8 +144,7 @@ if arg.startswith('"') or arg.startswith("'"): # XXX ootype info = arg.strip("'\"") - return ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, - llstr(info))) + return get_const_ptr_for_string(info) if arg.startswith('ConstClass('): name = arg[len('ConstClass('):-1] return self.get_const(name, 'class') Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py Fri Sep 24 12:06:58 2010 @@ -124,9 +124,6 @@ strequaldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [], [], oopspecindex=EffectInfo.OS_STR_EQUAL)) - mystr1 = lltype.cast_opaque_ptr(llmemory.GCREF, - rstr.string_repr.convert_const("x")) - class LoopToken(AbstractDescr): pass asmdescr = LoopToken() # it can be whatever, it's not a descr though Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py Fri Sep 24 12:06:58 2010 @@ -4019,7 +4019,7 @@ def test_str_concat_str_cstr1(self): ops = """ [p2] - p3 = call(0, p2, ConstPtr(mystr1), descr=strconcatdescr) + p3 = call(0, p2, "x", descr=strconcatdescr) jump(p3) """ expected = """ @@ -4029,12 +4029,28 @@ p3 = newstr(i3) i4 = strlen(p2) copystrcontent(p2, p3, 0, 0, i4) - strsetitem(p3, i4, 120) # == ord('x') == ord(mystr1) + strsetitem(p3, i4, 120) # == ord('x') i5 = int_add(i4, 1) # will be killed by the backend jump(p3) """ self.optimize_loop(ops, 'Not', expected) + def test_str_concat_consts(self): + ops = """ + [] + p1 = same_as("ab") + p2 = same_as("cde") + p3 = call(0, p1, p2, descr=strconcatdescr) + escape(p3) + jump() + """ + expected = """ + [] + escape("abcde") + jump() + """ + self.optimize_loop(ops, '', expected) + def test_str_slice_1(self): ops = """ [p1, i1, i2] From afa at codespeak.net Fri Sep 24 13:10:01 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Fri, 24 Sep 2010 13:10:01 +0200 (CEST) Subject: [pypy-svn] r77332 - pypy/branch/fast-forward/pypy/module/_socket Message-ID: <20100924111001.4E0C1282BD4@codespeak.net> Author: afa Date: Fri Sep 24 13:09:59 2010 New Revision: 77332 Modified: pypy/branch/fast-forward/pypy/module/_socket/interp_socket.py Log: Translation fix for socket.ioctl (Windows only) Modified: pypy/branch/fast-forward/pypy/module/_socket/interp_socket.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_socket/interp_socket.py (original) +++ pypy/branch/fast-forward/pypy/module/_socket/interp_socket.py Fri Sep 24 13:09:59 2010 @@ -347,37 +347,34 @@ recv_ptr = lltype.malloc(rwin32.LPDWORD.TO, 1, flavor='raw') try: if cmd == _c.SIO_RCVALL: - option_ptr = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') - try: - option_ptr[0] = space.uint_w(w_option) - option_ptr = rffi.cast(rffi.VOIDP, option_ptr) - res = _c.WSAIoctl( - self.fd, cmd, - option_ptr, rffi.sizeof(rffi.INTP), - rffi.NULL, 0, recv_ptr, rffi.NULL, rffi.NULL) - if res < 0: - raise error() - finally: - lltype.free(option_ptr, flavor='raw') + value_size = rffi.sizeof(rffi.INTP) elif cmd == _c.SIO_KEEPALIVE_VALS: - w_onoff, w_time, w_interval = space.unpackiterable(w_option) - option_ptr = lltype.malloc(_c.tcp_keepalive, flavor='raw') - try: - option_ptr.c_onoff = space.uint_w(w_onoff) - option_ptr.c_keepalivetime = space.uint_w(w_time) - option_ptr.c_keepaliveinterval = space.uint_w(w_interval) - option_ptr = rffi.cast(rffi.VOIDP, option_ptr) - res = _c.WSAIoctl( - self.fd, cmd, - option_ptr, rffi.sizeof(_c.tcp_keepalive), - rffi.NULL, 0, recv_ptr, rffi.NULL, rffi.NULL) - if res < 0: - raise error() - finally: - lltype.free(option_ptr, flavor='raw') + value_size = rffi.sizeof(_c.tcp_keepalive) else: raise operationerrfmt(space.w_ValueError, "invalid ioctl command %d", cmd) + + value_ptr = lltype.malloc(rffi.VOIDP.TO, value_size, flavor='raw') + try: + if cmd == _c.SIO_RCVALL: + option_ptr = rffi.cast(rffi.INTP, value_ptr) + option_ptr[0] = space.int_w(w_option) + elif cmd == _c.SIO_KEEPALIVE_VALS: + w_onoff, w_time, w_interval = space.unpackiterable(w_option) + option_ptr = rffi.cast(lltype.Ptr(_c.tcp_keepalive), value_ptr) + option_ptr.c_onoff = space.uint_w(w_onoff) + option_ptr.c_keepalivetime = space.uint_w(w_time) + option_ptr.c_keepaliveinterval = space.uint_w(w_interval) + + res = _c.WSAIoctl( + self.fd, cmd, value_ptr, value_size, + rffi.NULL, 0, recv_ptr, rffi.NULL, rffi.NULL) + if res < 0: + raise converted_error(space, rsocket.last_error()) + finally: + if value_ptr: + lltype.free(value_ptr, flavor='raw') + return space.wrap(recv_ptr[0]) finally: lltype.free(recv_ptr, flavor='raw') From afa at codespeak.net Fri Sep 24 13:13:56 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Fri, 24 Sep 2010 13:13:56 +0200 (CEST) Subject: [pypy-svn] r77333 - pypy/branch/fast-forward/pypy/module/sys Message-ID: <20100924111356.66938282BD4@codespeak.net> Author: afa Date: Fri Sep 24 13:13:54 2010 New Revision: 77333 Modified: pypy/branch/fast-forward/pypy/module/sys/__init__.py Log: On Winddows, add a dummy sys.dllhandle. This allows ctypes to define pythonapi() and import without failing. All usages will fail though. I'm sure it's possible to to use cpyext here. Modified: pypy/branch/fast-forward/pypy/module/sys/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/sys/__init__.py (original) +++ pypy/branch/fast-forward/pypy/module/sys/__init__.py Fri Sep 24 13:13:54 2010 @@ -80,6 +80,7 @@ } if sys.platform == 'win32': + interpleveldefs['dllhandle'] = 'space.wrap(0)' # XXX for the moment interpleveldefs['winver'] = 'version.get_winver(space)' interpleveldefs['getwindowsversion'] = 'vm.getwindowsversion' From afa at codespeak.net Fri Sep 24 13:14:25 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Fri, 24 Sep 2010 13:14:25 +0200 (CEST) Subject: [pypy-svn] r77334 - in pypy/branch/fast-forward/pypy/module/_ast: . test Message-ID: <20100924111425.2930F282BD4@codespeak.net> Author: afa Date: Fri Sep 24 13:14:23 2010 New Revision: 77334 Modified: pypy/branch/fast-forward/pypy/module/_ast/__init__.py pypy/branch/fast-forward/pypy/module/_ast/test/test_ast.py Log: Add _ast.__version__, this fixes "import ast" Modified: pypy/branch/fast-forward/pypy/module/_ast/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_ast/__init__.py (original) +++ pypy/branch/fast-forward/pypy/module/_ast/__init__.py Fri Sep 24 13:14:23 2010 @@ -5,7 +5,8 @@ class Module(MixedModule): interpleveldefs = { - "PyCF_ONLY_AST" : "space.wrap(%s)" % consts.PyCF_ONLY_AST + "PyCF_ONLY_AST" : "space.wrap(%s)" % consts.PyCF_ONLY_AST, + "__version__" : "space.wrap('82160')", # from CPython's svn. } appleveldefs = {} Modified: pypy/branch/fast-forward/pypy/module/_ast/test/test_ast.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_ast/test/test_ast.py (original) +++ pypy/branch/fast-forward/pypy/module/_ast/test/test_ast.py Fri Sep 24 13:14:23 2010 @@ -15,6 +15,10 @@ return mod return get_ast""") + def test_module(self): + ast = self.ast + assert isinstance(ast.__version__, str) + def test_build_ast(self): ast = self.ast mod = self.get_ast("x = 4") From arigo at codespeak.net Fri Sep 24 13:23:21 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 24 Sep 2010 13:23:21 +0200 (CEST) Subject: [pypy-svn] r77335 - pypy/trunk/pypy/rpython/lltypesystem Message-ID: <20100924112321.2004A282BD4@codespeak.net> Author: arigo Date: Fri Sep 24 13:23:19 2010 New Revision: 77335 Modified: pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py Log: A fix for ctypes returning a long instead of a signed integer in this operation. Found by what I'm going to check in next. Modified: pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py Fri Sep 24 13:23:19 2010 @@ -375,7 +375,7 @@ "Returns the storage address as an int" if self._storage is None or self._storage is True: raise ValueError("Not a ctypes allocated structure") - return ctypes.cast(self._storage, ctypes.c_void_p).value + return intmask(ctypes.cast(self._storage, ctypes.c_void_p).value) def _free(self): self._check() # no double-frees From arigo at codespeak.net Fri Sep 24 13:29:52 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 24 Sep 2010 13:29:52 +0200 (CEST) Subject: [pypy-svn] r77336 - in pypy/trunk/pypy: jit/backend/x86 rlib rpython/lltypesystem rpython/lltypesystem/test Message-ID: <20100924112952.B927F282BD4@codespeak.net> Author: arigo Date: Fri Sep 24 13:29:49 2010 New Revision: 77336 Modified: pypy/trunk/pypy/jit/backend/x86/codebuf.py pypy/trunk/pypy/rlib/rmmap.py pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py pypy/trunk/pypy/rpython/lltypesystem/test/test_ll2ctypes.py Log: Add a way to force (manually) ll2ctypes to allocate objects "far apart" from each other. To be used to detect some class of issues with the 64-bit JIT. Modified: pypy/trunk/pypy/jit/backend/x86/codebuf.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/codebuf.py (original) +++ pypy/trunk/pypy/jit/backend/x86/codebuf.py Fri Sep 24 13:29:49 2010 @@ -152,7 +152,7 @@ # Hack to make sure that mcs are not within 32-bits of one # another for testing purposes from pypy.rlib.rmmap import hint - hint.pos += 0xFFFFFFFF + hint.pos += 0x80000000 - map_size self._init(data, map_size) Modified: pypy/trunk/pypy/rlib/rmmap.py ============================================================================== --- pypy/trunk/pypy/rlib/rmmap.py (original) +++ pypy/trunk/pypy/rlib/rmmap.py Fri Sep 24 13:29:49 2010 @@ -292,7 +292,8 @@ c_munmap(self.getptr(0), self.size) self.setdata(NODATA, 0) - __del__ = close + def __del__(self): + self.close() def unmapview(self): UnmapViewOfFile(self.getptr(0)) Modified: pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py Fri Sep 24 13:29:49 2010 @@ -26,6 +26,60 @@ from pypy.translator.platform import platform from array import array +# ____________________________________________________________ + +far_regions = None + +def allocate_ctypes(ctype): + if far_regions: + import random + pieces = far_regions._ll2ctypes_pieces + num = random.randrange(len(pieces)) + i1, stop = pieces[num] + i2 = i1 + (ctypes.sizeof(ctype) + 7) & ~7 + if i2 > stop: + raise MemoryError("out of memory in far_regions") + pieces[num] = i2, stop + p = lltype2ctypes(far_regions.getptr(i1)) + return ctypes.cast(p, ctypes.POINTER(ctype)).contents + else: + return ctype() + +def do_allocation_in_far_regions(): + """On 32 bits: this reserves 1.25GB of address space, or 2.5GB on Linux, + which helps test this module for address values that are signed or + unsigned. + + On 64-bits: reserves 10 times 2GB of address space. This should help + to find 32-vs-64-bit issues in the JIT. It is likely that objects + are further apart than 32 bits can represent; it is also possible + to hit the corner case of being precisely e.g. 2GB - 8 bytes apart. + + Avoid this function if your OS reserves actual RAM from mmap() eagerly. + """ + global far_regions + if not far_regions: + from pypy.rlib import rmmap + if sys.maxint > 0x7FFFFFFF: + PIECESIZE = 0x80000000 + else: + if sys.platform == 'linux': + PIECESIZE = 0x10000000 + else: + PIECESIZE = 0x08000000 + PIECES = 10 + m = rmmap.mmap(-1, PIECES * PIECESIZE, + rmmap.MAP_PRIVATE|rmmap.MAP_ANONYMOUS, + rmmap.PROT_READ|rmmap.PROT_WRITE) + m.close = lambda : None # leak instead of giving a spurious + # error at CPython's shutdown + m._ll2ctypes_pieces = [] + for i in range(PIECES): + m._ll2ctypes_pieces.append((i * PIECESIZE, (i+1) * PIECESIZE)) + far_regions = m + +# ____________________________________________________________ + _ctypes_cache = {} _eci_cache = {} @@ -87,13 +141,13 @@ if S._arrayfld is None: if n is not None: raise TypeError("%r is not variable-sized" % (S,)) - storage = cls() + storage = allocate_ctypes(cls) return storage else: if n is None: raise TypeError("%r is variable-sized" % (S,)) biggercls = build_ctypes_struct(S, None, n) - bigstruct = biggercls() + bigstruct = allocate_ctypes(biggercls) array = getattr(bigstruct, S._arrayfld) if hasattr(array, 'length'): array.length = n @@ -135,7 +189,7 @@ if not isinstance(n, int): raise TypeError, "array length must be an int" biggercls = get_ctypes_array_of_size(A, n) - bigarray = biggercls() + bigarray = allocate_ctypes(biggercls) if hasattr(bigarray, 'length'): bigarray.length = n return bigarray Modified: pypy/trunk/pypy/rpython/lltypesystem/test/test_ll2ctypes.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/test/test_ll2ctypes.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/test/test_ll2ctypes.py Fri Sep 24 13:29:49 2010 @@ -16,6 +16,12 @@ from pypy.annotation.annrpython import RPythonAnnotator from pypy.rpython.rtyper import RPythonTyper + +if False: # for now, please keep it False by default + from pypy.rpython.lltypesystem import ll2ctypes + ll2ctypes.do_allocation_in_far_regions() + + class TestLL2Ctypes(object): def setup_method(self, meth): From arigo at codespeak.net Fri Sep 24 13:34:01 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 24 Sep 2010 13:34:01 +0200 (CEST) Subject: [pypy-svn] r77337 - pypy/trunk/pypy/rpython/lltypesystem Message-ID: <20100924113401.C8BC8282BD4@codespeak.net> Author: arigo Date: Fri Sep 24 13:33:59 2010 New Revision: 77337 Modified: pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py Log: Fix: ctypes.sizeof() can return 0, e.g. for structs with no field. Modified: pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/ll2ctypes.py Fri Sep 24 13:33:59 2010 @@ -36,7 +36,7 @@ pieces = far_regions._ll2ctypes_pieces num = random.randrange(len(pieces)) i1, stop = pieces[num] - i2 = i1 + (ctypes.sizeof(ctype) + 7) & ~7 + i2 = i1 + ((ctypes.sizeof(ctype) or 1) + 7) & ~7 if i2 > stop: raise MemoryError("out of memory in far_regions") pieces[num] = i2, stop From afa at codespeak.net Fri Sep 24 13:36:25 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Fri, 24 Sep 2010 13:36:25 +0200 (CEST) Subject: [pypy-svn] r77338 - in pypy/branch/fast-forward: lib-python pypy/tool/pytest/run-script Message-ID: <20100924113625.5B5DA282BD4@codespeak.net> Author: afa Date: Fri Sep 24 13:36:23 2010 New Revision: 77338 Modified: pypy/branch/fast-forward/lib-python/conftest.py pypy/branch/fast-forward/pypy/tool/pytest/run-script/regrverbose.py Log: Always run regression tests in verbose mode. py.test hides the output anyway while running, and will give more informative results in reports. Modified: pypy/branch/fast-forward/lib-python/conftest.py ============================================================================== --- pypy/branch/fast-forward/lib-python/conftest.py (original) +++ pypy/branch/fast-forward/lib-python/conftest.py Fri Sep 24 13:36:23 2010 @@ -613,10 +613,6 @@ 'run-script', 'regrverbose.py') regrrun = str(regr_script) - if pypy_option.verbose: - regrrun_verbosity = '1' - else: - regrrun_verbosity = '0' TIMEOUT = gettimeout() if option.pypy: @@ -634,9 +630,9 @@ py.test.skip("%s module not included in %s" % (mod, execpath)) - cmd = "%s %s %s %s" %( + cmd = "%s %s %s" %( execpath, - regrrun, regrrun_verbosity, fspath.purebasename) + regrrun, fspath.purebasename) # add watchdog for timing out cmd = "%s %s %s %s" %( Modified: pypy/branch/fast-forward/pypy/tool/pytest/run-script/regrverbose.py ============================================================================== --- pypy/branch/fast-forward/pypy/tool/pytest/run-script/regrverbose.py (original) +++ pypy/branch/fast-forward/pypy/tool/pytest/run-script/regrverbose.py Fri Sep 24 13:36:23 2010 @@ -2,8 +2,8 @@ import sys import unittest from test import test_support -test_support.verbose = int(sys.argv[1]) -sys.argv[:] = sys.argv[2:] +test_support.verbose = 1 +sys.argv[:] = sys.argv[1:] modname = sys.argv[0] impname = 'test.' + modname From arigo at codespeak.net Fri Sep 24 13:41:48 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 24 Sep 2010 13:41:48 +0200 (CEST) Subject: [pypy-svn] r77339 - pypy/trunk/pypy/jit/backend/x86/test Message-ID: <20100924114148.4A870282BD4@codespeak.net> Author: arigo Date: Fri Sep 24 13:41:44 2010 New Revision: 77339 Modified: pypy/trunk/pypy/jit/backend/x86/test/conftest.py Log: Indeed, adding the check here (for 64-bits only, for now) makes tests fail. Modified: pypy/trunk/pypy/jit/backend/x86/test/conftest.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/test/conftest.py (original) +++ pypy/trunk/pypy/jit/backend/x86/test/conftest.py Fri Sep 24 13:41:44 2010 @@ -5,3 +5,6 @@ def pytest_runtest_setup(item): if cpu not in ('x86', 'x86_64'): py.test.skip("x86/x86_64 tests skipped: cpu is %r" % (cpu,)) + if cpu == 'x86_64': + from pypy.rpython.lltypesystem import ll2ctypes + ll2ctypes.do_allocation_in_far_regions() From arigo at codespeak.net Fri Sep 24 14:06:44 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 24 Sep 2010 14:06:44 +0200 (CEST) Subject: [pypy-svn] r77340 - pypy/trunk/pypy/jit/backend/x86 Message-ID: <20100924120644.EB526282BD4@codespeak.net> Author: arigo Date: Fri Sep 24 14:06:41 2010 New Revision: 77340 Modified: pypy/trunk/pypy/jit/backend/x86/assembler.py Log: A fix: loc_vtable might not fit in 32 bits. Modified: pypy/trunk/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/assembler.py (original) +++ pypy/trunk/pypy/jit/backend/x86/assembler.py Fri Sep 24 14:06:41 2010 @@ -1028,7 +1028,7 @@ if self.cpu.vtable_offset is not None: assert isinstance(loc, RegLoc) assert isinstance(loc_vtable, ImmedLoc) - self.mc.MOV_mi((loc.value, self.cpu.vtable_offset), loc_vtable.value) + self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable) # XXX genop_new is abused for all varsized mallocs with Boehm, for now # (instead of genop_new_array, genop_newstr, genop_newunicode) @@ -1863,6 +1863,7 @@ offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, [chr(offset)]) + # on 64-bits, 'tid' is a value that fits in 31 bits self.mc.MOV_mi((eax.value, 0), tid) self.mc.MOV(heap(nursery_free_adr), edx) From arigo at codespeak.net Fri Sep 24 14:24:00 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 24 Sep 2010 14:24:00 +0200 (CEST) Subject: [pypy-svn] r77341 - pypy/build/bot2/pypybuildbot Message-ID: <20100924122400.A8192282BD4@codespeak.net> Author: arigo Date: Fri Sep 24 14:23:58 2010 New Revision: 77341 Modified: pypy/build/bot2/pypybuildbot/master.py Log: Fix: use platform='linux64' on the 64-bits JIT build. Otherwise, they get the same name as the linux32 build and the file in /nightly/trunk overwrite each other :-( Modified: pypy/build/bot2/pypybuildbot/master.py ============================================================================== --- pypy/build/bot2/pypybuildbot/master.py (original) +++ pypy/build/bot2/pypybuildbot/master.py Fri Sep 24 14:23:58 2010 @@ -128,6 +128,14 @@ pypyjit=True, app_tests=True, ) +pypyJITTranslatedTestFactory64 = pypybuilds.Translated( + translationArgs=jit_translation_args, + targetArgs=[], + lib_python=True, + pypyjit=True, + app_tests=True, + platform='linux64', + ) pypyJITTranslatedTestFactoryOSX = pypybuilds.Translated( platform='osx', @@ -277,7 +285,7 @@ {'name': JITLINUX64, 'slavenames': ['tannit64'], 'builddir': JITLINUX64, - 'factory': pypyJITTranslatedTestFactory, + 'factory': pypyJITTranslatedTestFactory64, 'category': 'jit', }, {"name" : JITMACOSX32, From arigo at codespeak.net Fri Sep 24 15:12:14 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 24 Sep 2010 15:12:14 +0200 (CEST) Subject: [pypy-svn] r77342 - pypy/trunk/pypy/jit/backend/x86/test Message-ID: <20100924131214.AE72E36C545@codespeak.net> Author: arigo Date: Fri Sep 24 15:12:12 2010 New Revision: 77342 Modified: pypy/trunk/pypy/jit/backend/x86/test/test_regloc.py Log: Failing tests. Modified: pypy/trunk/pypy/jit/backend/x86/test/test_regloc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/test/test_regloc.py (original) +++ pypy/trunk/pypy/jit/backend/x86/test/test_regloc.py Fri Sep 24 15:12:12 2010 @@ -78,3 +78,62 @@ '\x49\x8B\x4B\x08' ) assert cb.getvalue() == expected_instructions + +def test_64bit_address_1(): + base_addr = 0xFEDCBA9876543210 + cb = LocationCodeBuilder64() + cb.CMP(ecx, AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr)) + # + expected_instructions = ( + # mov r11, 0xFEDCBA9876543210 + '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' + # cmp rcx, [r11] + '\x49\x3B\x0B' + ) + assert cb.getvalue() == expected_instructions + +def test_64bit_address_2(): + base_addr = 0xFEDCBA9876543210 + cb = LocationCodeBuilder64() + cb.MOV(ecx, AddressLoc(ImmedLoc(0), edx, 3, base_addr)) + # + expected_instructions = ( + # mov r11, 0xFEDCBA9876543210 + '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' + # mov rcx, [r11+8*rdx] + '\x49\x8B\x0C\xD3' + ) + assert cb.getvalue() == expected_instructions + +def test_64bit_address_3(): + base_addr = 0xFEDCBA9876543210 + cb = LocationCodeBuilder64() + cb.MOV(ecx, AddressLoc(edx, ImmedLoc(0), 0, base_addr)) + # + expected_instructions = ( + # mov r11, 0xFEDCBA9876543210 + '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' + # mov rcx, [rdx+r11] + '\x4A\x8B\x0C\x1A' + ) + assert cb.getvalue() == expected_instructions + +def test_64bit_address_4(): + base_addr = 0xFEDCBA9876543210 + cb = LocationCodeBuilder64() + cb.begin_reuse_scratch_register() + assert cb._reuse_scratch_register is True + assert cb._scratch_register_known is False + cb.MOV(ecx, AddressLoc(edx, esi, 2, base_addr)) + assert cb._reuse_scratch_register is True + assert cb._scratch_register_known is False + # + expected_instructions = ( + # mov r11, 0xFEDCBA9876543210 + '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' + # lea r11, [rdx+r11] + '\x4E\x8D\x1C\x1A' + # mov rcx, [r11+4*rsi] + '\x49\x8B\x0C\xB3' + ) + assert cb.getvalue() == expected_instructions From arigo at codespeak.net Fri Sep 24 15:39:38 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 24 Sep 2010 15:39:38 +0200 (CEST) Subject: [pypy-svn] r77343 - in pypy/trunk/pypy/jit/backend/x86: . test Message-ID: <20100924133938.8BB68282BD4@codespeak.net> Author: arigo Date: Fri Sep 24 15:39:37 2010 New Revision: 77343 Modified: pypy/trunk/pypy/jit/backend/x86/regloc.py pypy/trunk/pypy/jit/backend/x86/test/test_regloc.py Log: Test and fix for rare cases that use unsupported 64-bit immediates. Modified: pypy/trunk/pypy/jit/backend/x86/regloc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/regloc.py (original) +++ pypy/trunk/pypy/jit/backend/x86/regloc.py Fri Sep 24 15:39:37 2010 @@ -137,6 +137,12 @@ self._location_code = 'a' self.loc_a = (base_loc.value, scaled_loc.value, scale, static_offset) + def __repr__(self): + dict = {'j': 'value', 'a': 'loc_a', 'm': 'loc_m', 'a':'loc_a'} + attr = dict.get(self._location_code, '?') + info = getattr(self, attr, '?') + return '' % (self._location_code, info) + def location_code(self): return self._location_code @@ -159,6 +165,9 @@ self.value = address self.const_id = const_id + def __repr__(self): + return '' % (self.value, self.const_id) + def _getregkey(self): # XXX: 1000 is kind of magic: We just don't want to be confused # with any registers @@ -248,6 +257,14 @@ methname = name + "_" + possible_code1 + "m" _rx86_getattr(self, methname)(val1, reg_offset) else: + if possible_code1 == 'm' and not rx86.fits_in_32bits(val1[1]): + val1 = self._fix_static_offset_64_m(val1) + if possible_code2 == 'm' and not rx86.fits_in_32bits(val2[1]): + val2 = self._fix_static_offset_64_m(val2) + if possible_code1 == 'a' and not rx86.fits_in_32bits(val1[3]): + val1 = self._fix_static_offset_64_a(val1) + if possible_code2 == 'a' and not rx86.fits_in_32bits(val2[3]): + val2 = self._fix_static_offset_64_a(val2) methname = name + "_" + possible_code1 + possible_code2 _rx86_getattr(self, methname)(val1, val2) @@ -317,6 +334,34 @@ self.MOV_ri(X86_64_SCRATCH_REG.value, addr) return (X86_64_SCRATCH_REG.value, 0) + def _fix_static_offset_64_m(self, (basereg, static_offset)): + # For cases where an AddressLoc has the location_code 'm', but + # where the static offset does not fit in 32-bits. We have to fall + # back to the X86_64_SCRATCH_REG. It is even more annoying because + # we want to keep using the mode 'm'. These are all possibly rare + # cases; don't try to reuse a past value of the scratch register at + # all. + self._scratch_register_known = False + self.MOV_ri(X86_64_SCRATCH_REG.value, static_offset) + self.LEA_ra(X86_64_SCRATCH_REG.value, + (basereg, X86_64_SCRATCH_REG.value, 0, 0)) + return (X86_64_SCRATCH_REG.value, 0) + + def _fix_static_offset_64_a(self, (basereg, scalereg, + scale, static_offset)): + # For cases where an AddressLoc has the location_code 'a', but + # where the static offset does not fit in 32-bits. We have to fall + # back to the X86_64_SCRATCH_REG. In one case it is even more + # annoying. These are all possibly rare cases; don't try to reuse a + # past value of the scratch register at all. + self._scratch_register_known = False + self.MOV_ri(X86_64_SCRATCH_REG.value, static_offset) + # + if basereg != rx86.NO_BASE_REGISTER: + self.LEA_ra(X86_64_SCRATCH_REG.value, + (basereg, X86_64_SCRATCH_REG.value, 0, 0)) + return (X86_64_SCRATCH_REG.value, scalereg, scale, 0) + def begin_reuse_scratch_register(self): # Flag the beginning of a block where it is okay to reuse the value # of the scratch register. In theory we shouldn't have to do this if Modified: pypy/trunk/pypy/jit/backend/x86/test/test_regloc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/test/test_regloc.py (original) +++ pypy/trunk/pypy/jit/backend/x86/test/test_regloc.py Fri Sep 24 15:39:37 2010 @@ -58,82 +58,91 @@ expected_ofs = pos_addr - (neg_addr+5) assert s.getvalue() == '\xE9' + struct.pack(" Author: antocuni Date: Fri Sep 24 16:26:39 2010 New Revision: 77344 Modified: pypy/trunk/pypy/jit/metainterp/test/oparser.py pypy/trunk/pypy/jit/metainterp/test/test_oparser.py pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py Log: fix test_pypy_c, which was broken by the merging of the resoperation-refactoring branch Modified: pypy/trunk/pypy/jit/metainterp/test/oparser.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/oparser.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/oparser.py Fri Sep 24 16:26:39 2010 @@ -196,7 +196,8 @@ if opname == 'debug_merge_point': allargs = [argspec] else: - allargs = argspec.split(",") + allargs = [arg for arg in argspec.split(",") + if arg != ''] poss_descr = allargs[-1].strip() if poss_descr.startswith('descr='): Modified: pypy/trunk/pypy/jit/metainterp/test/test_oparser.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_oparser.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_oparser.py Fri Sep 24 16:26:39 2010 @@ -174,3 +174,12 @@ def test_parse_no_namespace(): loop = parse(example_loop_log, no_namespace=True) + +def test_parse_new_with_comma(): + # this is generated by PYPYJITLOG, check that we can handle it + x = ''' + [] + p0 = new(, descr=) + ''' + loop = parse(x) + assert loop.operations[0].getopname() == 'new' Modified: pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py ============================================================================== --- pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py (original) +++ pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py Fri Sep 24 16:26:39 2010 @@ -140,7 +140,7 @@ for op in loop.operations: if op.getopname() == "debug_merge_point": sliced_loop = BytecodeTrace() - sliced_loop.bytecode = op.args[0]._get_str().rsplit(" ", 1)[1] + sliced_loop.bytecode = op.getarg(0)._get_str().rsplit(" ", 1)[1] self.sliced_loops.append(sliced_loop) else: sliced_loop.append(op) From afa at codespeak.net Fri Sep 24 17:02:15 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Fri, 24 Sep 2010 17:02:15 +0200 (CEST) Subject: [pypy-svn] r77347 - in pypy/branch/fast-forward/pypy/rlib: rstruct test Message-ID: <20100924150215.CFF0C282B9E@codespeak.net> Author: afa Date: Fri Sep 24 17:02:12 2010 New Revision: 77347 Modified: pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py pypy/branch/fast-forward/pypy/rlib/test/test_rstruct.py Log: float('nan') is not RPython, use prebuilt constants instead. This should fix applevel tests with -A: rarithmetic.pyc contains NAN and INFINITY which could not be unmarshalled. Modified: pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rstruct/ieee.py Fri Sep 24 17:02:12 2010 @@ -58,7 +58,7 @@ if exp == MAX_EXP - MIN_EXP + 2: # nan or infinity - result = float('nan') if mant else float('inf') + result = rarithmetic.NAN if mant else rarithmetic.INFINITY elif exp == 0: # subnormal or zero result = math.ldexp(mant, MIN_EXP - MANT_DIG) Modified: pypy/branch/fast-forward/pypy/rlib/test/test_rstruct.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/test/test_rstruct.py (original) +++ pypy/branch/fast-forward/pypy/rlib/test/test_rstruct.py Fri Sep 24 17:02:12 2010 @@ -2,7 +2,7 @@ from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin from pypy.rlib.rstruct.runpack import runpack from pypy.rlib.rstruct import ieee -from pypy.rlib.rarithmetic import LONG_BIT +from pypy.rlib.rarithmetic import LONG_BIT, INFINITY, NAN, isnan from pypy.translator.c.test.test_genc import compile import struct @@ -48,10 +48,18 @@ return ieee.unpack_float(s, False) c_unpack = compile(unpack, [str]) - s = c_pack(123.456) - assert s == pack(123.456) - assert c_unpack(s) == 123.456 - - s = c_pack(-123.456) - assert s == pack(-123.456) - assert c_unpack(s) == -123.456 + def check_roundtrip(x): + s = c_pack(x) + assert s == pack(x) + if not isnan(x): + assert unpack(s) == x + assert c_unpack(s) == x + else: + assert isnan(unpack(s)) + assert isnan(c_unpack(s)) + + check_roundtrip(123.456) + check_roundtrip(-123.456) + check_roundtrip(INFINITY) + check_roundtrip(NAN) + From afa at codespeak.net Fri Sep 24 17:10:09 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Fri, 24 Sep 2010 17:10:09 +0200 (CEST) Subject: [pypy-svn] r77348 - pypy/branch/fast-forward/lib-python Message-ID: <20100924151009.33083282B9E@codespeak.net> Author: afa Date: Fri Sep 24 17:10:08 2010 New Revision: 77348 Added: pypy/branch/fast-forward/lib-python/TODO Log: Start a list of various independent tasks for 2.7.0. 190 tests files have failures. Volunteers needed! Added: pypy/branch/fast-forward/lib-python/TODO ============================================================================== --- (empty file) +++ pypy/branch/fast-forward/lib-python/TODO Fri Sep 24 17:10:08 2010 @@ -0,0 +1,20 @@ +TODO list for 2.7.0 +=================== + +- Missing builtin: bytes = str + +- Missing builtin: next = space.next(w_obj) + +- Missing builtin: bytearray (possibly reuse module.__pypy__.bytebuffer) + +- Octal literals: 0o777 + +- Seen in test_inspect, this has never worked in pypy:: + + assert eval('a', None, dict(a=42)) == 42 + +- Implement the _io module. At least _io.FileIO, and have io.py import + everything from _pyio.py + +- Finish _multiprocessing + From arigo at codespeak.net Fri Sep 24 17:14:12 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 24 Sep 2010 17:14:12 +0200 (CEST) Subject: [pypy-svn] r77349 - in pypy/trunk/pypy/jit/backend/x86: . test Message-ID: <20100924151412.E47B8282B9E@codespeak.net> Author: arigo Date: Fri Sep 24 17:14:11 2010 New Revision: 77349 Modified: pypy/trunk/pypy/jit/backend/x86/regloc.py pypy/trunk/pypy/jit/backend/x86/test/test_regloc.py Log: More cases found by test_zll_random. Rewrite a bit the logic that becomes huge. Modified: pypy/trunk/pypy/jit/backend/x86/regloc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/regloc.py (original) +++ pypy/trunk/pypy/jit/backend/x86/regloc.py Fri Sep 24 17:14:11 2010 @@ -33,6 +33,8 @@ def value_a(self): raise AssertionError("value_a undefined") def value_m(self): raise AssertionError("value_m undefined") + def find_unused_reg(self): return eax + class StackLoc(AssemblerLocation): _immutable_ = True def __init__(self, position, ebp_offset, num_words, type): @@ -88,6 +90,12 @@ def assembler(self): return '%' + repr(self) + def find_unused_reg(self): + if self.value == eax.value: + return edx + else: + return eax + class ImmedLoc(AssemblerLocation): _immutable_ = True width = WORD @@ -152,6 +160,21 @@ def value_m(self): return self.loc_m + def find_unused_reg(self): + if self._location_code == 'm': + if self.loc_m[0] == eax.value: + return edx + elif self._location_code == 'a': + if self.loc_a[0] == eax.value: + if self.loc_a[1] == edx.value: + return ecx + return edx + if self.loc_a[1] == eax.value: + if self.loc_a[0] == edx.value: + return ecx + return edx + return eax + class ConstFloatLoc(AssemblerLocation): # XXX: We have to use this class instead of just AddressLoc because # AddressLoc is "untyped" and also we to have need some sort of unique @@ -215,6 +238,32 @@ _scratch_register_value = 0 def _binaryop(name): + + def insn_with_64_bit_immediate(self, loc1, loc2): + # These are the worst cases: + val2 = loc2.value_i() + code1 = loc1.location_code() + if (code1 == 'j' + or (code1 == 'm' and not rx86.fits_in_32bits(loc1.value_m()[1])) + or (code1 == 'a' and not rx86.fits_in_32bits(loc1.value_a()[3]))): + # INSN_ji, and both operands are 64-bit; or INSN_mi or INSN_ai + # and the constant offset in the address is 64-bit. + # Hopefully this doesn't happen too often + freereg = loc1.find_unused_reg() + self.PUSH_r(freereg.value) + self.MOV_ri(freereg.value, val2) + INSN(self, loc1, freereg) + self.POP_r(freereg.value) + else: + # For this case, we should not need the scratch register more than here. + self._load_scratch(val2) + INSN(self, loc1, X86_64_SCRATCH_REG) + + def invoke(self, codes, val1, val2): + methname = name + "_" + codes + _rx86_getattr(self, methname)(val1, val2) + invoke._annspecialcase_ = 'specialize:arg(1)' + def INSN(self, loc1, loc2): code1 = loc1.location_code() code2 = loc2.location_code() @@ -227,46 +276,38 @@ if loc1 is X86_64_SCRATCH_REG and not name.startswith("MOV"): assert code2 not in ('j', 'i') - for possible_code1 in unrolling_location_codes: - if code1 == possible_code1: - for possible_code2 in unrolling_location_codes: - if code2 == possible_code2: + for possible_code2 in unrolling_location_codes: + if code2 == possible_code2: + val2 = getattr(loc2, "value_" + possible_code2)() + # + # Fake out certain operations for x86_64 + if self.WORD == 8 and possible_code2 == 'i' and not rx86.fits_in_32bits(val2): + insn_with_64_bit_immediate(self, loc1, loc2) + return + # + # Regular case + for possible_code1 in unrolling_location_codes: + if code1 == possible_code1: val1 = getattr(loc1, "value_" + possible_code1)() - val2 = getattr(loc2, "value_" + possible_code2)() - # Fake out certain operations for x86_64 - if self.WORD == 8 and possible_code2 == 'i' and not rx86.fits_in_32bits(val2): - if possible_code1 == 'j': - # This is the worst case: INSN_ji, and both operands are 64-bit - # Hopefully this doesn't happen too often - self.PUSH_r(eax.value) - self.MOV_ri(eax.value, val1) - self.MOV_ri(X86_64_SCRATCH_REG.value, val2) - methname = name + "_mr" - _rx86_getattr(self, methname)((eax.value, 0), X86_64_SCRATCH_REG.value) - self.POP_r(eax.value) - else: - self.MOV_ri(X86_64_SCRATCH_REG.value, val2) - methname = name + "_" + possible_code1 + "r" - _rx86_getattr(self, methname)(val1, X86_64_SCRATCH_REG.value) - elif self.WORD == 8 and possible_code1 == 'j': - reg_offset = self._addr_as_reg_offset(val1) - methname = name + "_" + "m" + possible_code2 - _rx86_getattr(self, methname)(reg_offset, val2) + # More faking out of certain operations for x86_64 + if self.WORD == 8 and possible_code1 == 'j': + val1 = self._addr_as_reg_offset(val1) + invoke(self, "m" + possible_code2, val1, val2) elif self.WORD == 8 and possible_code2 == 'j': - reg_offset = self._addr_as_reg_offset(val2) - methname = name + "_" + possible_code1 + "m" - _rx86_getattr(self, methname)(val1, reg_offset) + val2 = self._addr_as_reg_offset(val2) + invoke(self, possible_code1 + "m", val1, val2) + elif possible_code1 == 'm' and not rx86.fits_in_32bits(val1[1]): + val1 = self._fix_static_offset_64_m(val1) + invoke(self, "a" + possible_code2, val1, val2) + elif possible_code2 == 'm' and not rx86.fits_in_32bits(val2[1]): + val2 = self._fix_static_offset_64_m(val2) + invoke(self, possible_code1 + "a", val1, val2) else: - if possible_code1 == 'm' and not rx86.fits_in_32bits(val1[1]): - val1 = self._fix_static_offset_64_m(val1) - if possible_code2 == 'm' and not rx86.fits_in_32bits(val2[1]): - val2 = self._fix_static_offset_64_m(val2) if possible_code1 == 'a' and not rx86.fits_in_32bits(val1[3]): val1 = self._fix_static_offset_64_a(val1) if possible_code2 == 'a' and not rx86.fits_in_32bits(val2[3]): val2 = self._fix_static_offset_64_a(val2) - methname = name + "_" + possible_code1 + possible_code2 - _rx86_getattr(self, methname)(val1, val2) + invoke(self, possible_code1 + possible_code2, val1, val2) return func_with_new_name(INSN, "INSN_" + name) @@ -277,7 +318,7 @@ if code == possible_code: val = getattr(loc, "value_" + possible_code)() if self.WORD == 8 and possible_code == 'i' and not rx86.fits_in_32bits(val): - self.MOV_ri(X86_64_SCRATCH_REG.value, val) + self._load_scratch(val) _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value) else: methname = name + "_" + possible_code @@ -297,7 +338,7 @@ _rx86_getattr(self, name + "_l")(val) else: assert self.WORD == 8 - self.MOV_ri(X86_64_SCRATCH_REG.value, val) + self._load_scratch(val) _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value) else: methname = name + "_" + possible_code @@ -337,15 +378,12 @@ def _fix_static_offset_64_m(self, (basereg, static_offset)): # For cases where an AddressLoc has the location_code 'm', but # where the static offset does not fit in 32-bits. We have to fall - # back to the X86_64_SCRATCH_REG. It is even more annoying because - # we want to keep using the mode 'm'. These are all possibly rare - # cases; don't try to reuse a past value of the scratch register at - # all. + # back to the X86_64_SCRATCH_REG. Note that this returns a location + # encoded as mode 'a'. These are all possibly rare cases; don't try + # to reuse a past value of the scratch register at all. self._scratch_register_known = False self.MOV_ri(X86_64_SCRATCH_REG.value, static_offset) - self.LEA_ra(X86_64_SCRATCH_REG.value, - (basereg, X86_64_SCRATCH_REG.value, 0, 0)) - return (X86_64_SCRATCH_REG.value, 0) + return (basereg, X86_64_SCRATCH_REG.value, 0, 0) def _fix_static_offset_64_a(self, (basereg, scalereg, scale, static_offset)): @@ -362,6 +400,15 @@ (basereg, X86_64_SCRATCH_REG.value, 0, 0)) return (X86_64_SCRATCH_REG.value, scalereg, scale, 0) + def _load_scratch(self, value): + if (self._scratch_register_known + and value == self._scratch_register_value): + return + if self._reuse_scratch_register: + self._scratch_register_known = True + self._scratch_register_value = value + self.MOV_ri(X86_64_SCRATCH_REG.value, value) + def begin_reuse_scratch_register(self): # Flag the beginning of a block where it is okay to reuse the value # of the scratch register. In theory we shouldn't have to do this if Modified: pypy/trunk/pypy/jit/backend/x86/test/test_regloc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/test/test_regloc.py (original) +++ pypy/trunk/pypy/jit/backend/x86/test/test_regloc.py Fri Sep 24 17:14:11 2010 @@ -83,10 +83,13 @@ ) assert cb.getvalue() == expected_instructions + # ------------------------------------------------------------ + def test_64bit_address_1(self): base_addr = 0xFEDCBA9876543210 cb = LocationCodeBuilder64() cb.CMP(ecx, AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr)) + # this case is a CMP_rj # expected_instructions = ( # mov r11, 0xFEDCBA9876543210 @@ -100,6 +103,7 @@ base_addr = 0xFEDCBA9876543210 cb = LocationCodeBuilder64() cb.MOV(ecx, AddressLoc(ImmedLoc(0), edx, 3, base_addr)) + # this case is a CMP_ra # expected_instructions = ( # mov r11, 0xFEDCBA9876543210 @@ -113,17 +117,13 @@ base_addr = 0xFEDCBA9876543210 cb = LocationCodeBuilder64() cb.MOV(ecx, AddressLoc(edx, ImmedLoc(0), 0, base_addr)) + # this case is a CMP_rm # - # sub-efficient instruction generated in that particular case: - # the LEA is not really needed, but it's useful because we can - # keep the same mode 'm' for generating the final instruction expected_instructions = ( # mov r11, 0xFEDCBA9876543210 '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' - # lea r11, [rdx+r11] - '\x4E\x8D\x1C\x1A' - # mov rcx, [r11] - '\x49\x8B\x0B' + # mov rcx, [rdx+r11] + '\x4A\x8B\x0C\x1A' ) assert cb.getvalue() == expected_instructions @@ -136,6 +136,7 @@ cb.MOV(ecx, AddressLoc(edx, esi, 2, base_addr)) assert cb._reuse_scratch_register is True assert cb._scratch_register_known is False + # this case is a CMP_ra # expected_instructions = ( # mov r11, 0xFEDCBA9876543210 @@ -146,3 +147,161 @@ '\x49\x8B\x0C\xB3' ) assert cb.getvalue() == expected_instructions + + # ------------------------------------------------------------ + + def test_MOV_immed32_into_64bit_address_1(self): + immed = -0x01234567 + base_addr = 0xFEDCBA9876543210 + cb = LocationCodeBuilder64() + cb.MOV(AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr), + ImmedLoc(immed)) + # this case is a MOV_ji + # + expected_instructions = ( + # mov r11, 0xFEDCBA9876543210 + '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' + # mov [r11], -0x01234567 + '\x49\xC7\x03\x99\xBA\xDC\xFE' + ) + assert cb.getvalue() == expected_instructions + + def test_MOV_immed32_into_64bit_address_2(self): + immed = -0x01234567 + base_addr = 0xFEDCBA9876543210 + cb = LocationCodeBuilder64() + cb.MOV(AddressLoc(ImmedLoc(0), edx, 3, base_addr), + ImmedLoc(immed)) + # this case is a MOV_ai + # + expected_instructions = ( + # mov r11, 0xFEDCBA9876543210 + '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' + # mov [r11+8*rdx], -0x01234567 + '\x49\xC7\x04\xD3\x99\xBA\xDC\xFE' + ) + assert cb.getvalue() == expected_instructions + + def test_MOV_immed32_into_64bit_address_3(self): + immed = -0x01234567 + base_addr = 0xFEDCBA9876543210 + cb = LocationCodeBuilder64() + cb.MOV(AddressLoc(edx, ImmedLoc(0), 0, base_addr), + ImmedLoc(immed)) + # this case is a MOV_mi + # + expected_instructions = ( + # mov r11, 0xFEDCBA9876543210 + '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' + # mov [rdx+r11], -0x01234567 + '\x4A\xC7\x04\x1A\x99\xBA\xDC\xFE' + ) + assert cb.getvalue() == expected_instructions + + def test_MOV_immed32_into_64bit_address_4(self): + immed = -0x01234567 + base_addr = 0xFEDCBA9876543210 + cb = LocationCodeBuilder64() + cb.MOV(AddressLoc(edx, esi, 2, base_addr), ImmedLoc(immed)) + # this case is a MOV_ai + # + expected_instructions = ( + # mov r11, 0xFEDCBA9876543210 + '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' + # lea r11, [rdx+r11] + '\x4E\x8D\x1C\x1A' + # mov [r11+4*rsi], -0x01234567 + '\x49\xC7\x04\xB3\x99\xBA\xDC\xFE' + ) + assert cb.getvalue() == expected_instructions + + # ------------------------------------------------------------ + + def test_MOV_immed64_into_64bit_address_1(self): + immed = 0x0123456789ABCDEF + base_addr = 0xFEDCBA9876543210 + cb = LocationCodeBuilder64() + cb.MOV(AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr), + ImmedLoc(immed)) + # this case is a MOV_ji + # + expected_instructions = ( + # push rax + '\x50' + # mov rax, 0x0123456789ABCDEF + '\x48\xB8\xEF\xCD\xAB\x89\x67\x45\x23\x01' + # mov r11, 0xFEDCBA9876543210 + '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' + # mov [r11], rax + '\x49\x89\x03' + # pop rax + '\x58' + ) + assert cb.getvalue() == expected_instructions + + def test_MOV_immed64_into_64bit_address_2(self): + immed = 0x0123456789ABCDEF + base_addr = 0xFEDCBA9876543210 + cb = LocationCodeBuilder64() + cb.MOV(AddressLoc(ImmedLoc(0), edx, 3, base_addr), + ImmedLoc(immed)) + # this case is a MOV_ai + # + expected_instructions = ( + # push rax + '\x50' + # mov rax, 0x0123456789ABCDEF + '\x48\xB8\xEF\xCD\xAB\x89\x67\x45\x23\x01' + # mov r11, 0xFEDCBA9876543210 + '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' + # mov [r11+8*rdx], rax + '\x49\x89\x04\xD3' + # pop rax + '\x58' + ) + assert cb.getvalue() == expected_instructions + + def test_MOV_immed64_into_64bit_address_3(self): + immed = 0x0123456789ABCDEF + base_addr = 0xFEDCBA9876543210 + cb = LocationCodeBuilder64() + cb.MOV(AddressLoc(eax, ImmedLoc(0), 0, base_addr), + ImmedLoc(immed)) + # this case is a MOV_mi + # + expected_instructions = ( + # push rdx + '\x52' + # mov rdx, 0x0123456789ABCDEF + '\x48\xBA\xEF\xCD\xAB\x89\x67\x45\x23\x01' + # mov r11, 0xFEDCBA9876543210 + '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' + # mov [rax+r11], rdx + '\x4A\x89\x14\x18' + # pop rdx + '\x5A' + ) + assert cb.getvalue() == expected_instructions + + def test_MOV_immed64_into_64bit_address_4(self): + immed = 0x0123456789ABCDEF + base_addr = 0xFEDCBA9876543210 + cb = LocationCodeBuilder64() + cb.MOV(AddressLoc(edx, eax, 2, base_addr), ImmedLoc(immed)) + # this case is a MOV_ai + # + expected_instructions = ( + # push rcx + '\x51' + # mov rcx, 0x0123456789ABCDEF + '\x48\xB9\xEF\xCD\xAB\x89\x67\x45\x23\x01' + # mov r11, 0xFEDCBA9876543210 + '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' + # lea r11, [rdx+r11] + '\x4E\x8D\x1C\x1A' + # mov [r11+4*rax], rcx + '\x49\x89\x0C\x83' + # pop rcx + '\x59' + ) + assert cb.getvalue() == expected_instructions From hakanardo at codespeak.net Fri Sep 24 17:16:01 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Fri, 24 Sep 2010 17:16:01 +0200 (CEST) Subject: [pypy-svn] r77350 - in pypy/branch/jit-loop-invaraints/pypy/jit/metainterp: optimizeopt test Message-ID: <20100924151601.8B538282B9E@codespeak.net> Author: hakanardo Date: Fri Sep 24 17:15:59 2010 New Revision: 77350 Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/__init__.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py Log: Inlines the preamble, but still tries to call the preamble. Why? Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/__init__.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/__init__.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/__init__.py Fri Sep 24 17:15:59 2010 @@ -11,15 +11,18 @@ must be applicable to the loop; you will probably get an AssertionError if not. """ - optimizations = [OptIntBounds(), + optimizations = [OptInvariant(), + OptIntBounds(), OptRewrite(), OptVirtualize(), OptHeap(), - OptInvariant(), ] optimizer = Optimizer(metainterp_sd, loop, optimizations, virtuals) optimizer.propagate_all_forward() + print + print + print loop.operations def optimize_bridge_1(metainterp_sd, bridge): """The same, but for a bridge. The only difference is that we don't Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py Fri Sep 24 17:15:59 2010 @@ -1,5 +1,6 @@ from pypy.jit.metainterp.optimizeopt.optimizer import * from pypy.jit.metainterp.resoperation import rop, ResOperation +from pypy.jit.metainterp.compile import prebuiltNotSpecNode class OptInvariant(Optimization): """Move loop invariant code into a preamble. @@ -8,45 +9,81 @@ if not virtuals: return - inputargs = self.optimizer.original_inputargs + #inputargs = self.optimizer.original_inputargs + inputargs = self.optimizer.loop.inputargs if not inputargs: return jump_op = self.optimizer.loop.operations[-1] - assert(jump_op.opnum == rop.JUMP) - #for arg_in, arg_out in zip(inputargs, jump_op.args): - print - print inputargs, jump_op.args + if jump_op.opnum != rop.JUMP: + return + for i in range(len(inputargs)): arg_in, arg_out = inputargs[i], jump_op.args[i] if arg_in is arg_out: - print "Invariant: ", arg_in v = self.getvalue(arg_in) v.invariant = True - self.invariant_boxes = [] - + + def invariant_boxes(self): + invariant_boxes = [] + for op in self.optimizer.preamble: + if self.get_constant_box(op.result) is None: + v = self.getvalue(op.result) + v.invariant = True + box = v.force_box() + if box and box not in invariant_boxes: + invariant_boxes.append(box) + return invariant_boxes + def propagate_forward(self, op): if op.opnum == rop.JUMP: loop = self.optimizer.loop if loop.preamble and len(self.optimizer.preamble)>0: + # First trace through loop, create preamble + self.emit_operation(op) preamble = loop.preamble preamble.inputargs = loop.inputargs[:] - loop.inputargs.extend(self.invariant_boxes) - op.args = op.args + self.invariant_boxes + + invariant_boxes = self.invariant_boxes() + loop.inputargs.extend(invariant_boxes) + op.args = op.args + invariant_boxes preamble.operations = self.optimizer.preamble - preamble.token.specnodes = loop.token.specnodes + preamble.token.specnodes = loop.token.specnodes + loop.token.specnodes = loop.token.specnodes + \ + [prebuiltNotSpecNode] * len(invariant_boxes) + + print + print loop.token.number + print len(loop.token.specnodes) jmp = ResOperation(rop.JUMP, loop.inputargs[:], None) jmp.descr = loop.token preamble.operations.append(jmp) + preamble.token.preamble = preamble + return + + elif op.descr.preamble: + # Bridge calling a loop with preamble, inline it + # + print + print "hi: ", op + print loop + print + self.inline(op.descr.preamble, op.args) + return elif (op.is_always_pure()):# or op.is_foldable_guard() or op.is_ovf()): if self.has_invariant_args(op): - self.emit_invariant(op) + op.invariant = True + self.emit_operation(op) + if self.get_constant_box(op.result) is None: + v = self.getvalue(op.result) + v.invariant = True return + #elif op.is_guard_overflow(): # prev_op = self.optimizer.loop.operations[self.optimizer.i - 1] # v = self.getvalue(prev_op.result) @@ -56,17 +93,6 @@ self.emit_operation(op) - def emit_invariant(self, op): - print "P: ", op, op.opnum - op.invariant = True - self.emit_operation(op) - if self.get_constant_box(op.result) is None: - v = self.getvalue(op.result) - v.invariant = True - box = v.force_box() - if box and box not in self.invariant_boxes: - self.invariant_boxes.append(box) - def has_invariant_args(self, op): for a in op.args: if self.get_constant_box(a) is None: @@ -76,4 +102,17 @@ if not v.invariant: return False return True + + def inline(self, loop, inputargs): + argmap = {} + for i in range(len(inputargs)): + argmap[loop.inputargs[i]] = inputargs[i] + for op in loop.operations: + newop = op.clone() + newop.args = [argmap[a] for a in op.args] + if op.result: + newop.result = op.result.clonebox() + argmap[op.result] = newop.result + self.emit_operation(newop) + Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py Fri Sep 24 17:15:59 2010 @@ -203,7 +203,7 @@ self.pendingfields = [] self.preamble = [] - self.original_inputargs = self.loop.inputargs + #self.original_inputargs = self.loop.inputargs if optimizations: self.first_optimization = optimizations[0] Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/virtualize.py Fri Sep 24 17:15:59 2010 @@ -284,6 +284,9 @@ def optimize_JUMP(self, op): orgop = self.optimizer.loop.operations[-1] + #orgop.descr = op.descr + orgop = op + print 'Nbr: ', op.descr.number exitargs = [] target_loop_token = orgop.descr assert isinstance(target_loop_token, LoopToken) Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py Fri Sep 24 17:15:59 2010 @@ -303,7 +303,7 @@ found += 1 assert found == 1 - def test_loop_invariant_mul(self): + def test_loop_invariant_mul1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) def f(x, y): res = 0 @@ -340,6 +340,22 @@ 'int_mul': 1, 'jump': 2}) + def test_loop_invariant_mul_guard(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + res += x * x + if y<8: + x += 1 + y -= 1 + return res + res = self.meta_interp(f, [6, 16]) + assert res == 265 + self.check_loop_count(3) + def test_loop_invariant_intbox(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) class I: From arigo at codespeak.net Fri Sep 24 17:48:15 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 24 Sep 2010 17:48:15 +0200 (CEST) Subject: [pypy-svn] r77351 - pypy/trunk/pypy/jit/backend/x86 Message-ID: <20100924154815.A3881282B9E@codespeak.net> Author: arigo Date: Fri Sep 24 17:48:13 2010 New Revision: 77351 Modified: pypy/trunk/pypy/jit/backend/x86/regloc.py Log: RPython fix. (Yes, yes.) Modified: pypy/trunk/pypy/jit/backend/x86/regloc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/regloc.py (original) +++ pypy/trunk/pypy/jit/backend/x86/regloc.py Fri Sep 24 17:48:13 2010 @@ -308,6 +308,7 @@ if possible_code2 == 'a' and not rx86.fits_in_32bits(val2[3]): val2 = self._fix_static_offset_64_a(val2) invoke(self, possible_code1 + possible_code2, val1, val2) + return return func_with_new_name(INSN, "INSN_" + name) From afa at codespeak.net Fri Sep 24 18:07:57 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Fri, 24 Sep 2010 18:07:57 +0200 (CEST) Subject: [pypy-svn] r77352 - in pypy/branch/fast-forward/pypy/interpreter/astcompiler: . test Message-ID: <20100924160757.3F5F4282B9E@codespeak.net> Author: afa Date: Fri Sep 24 18:07:55 2010 New Revision: 77352 Modified: pypy/branch/fast-forward/pypy/interpreter/astcompiler/codegen.py pypy/branch/fast-forward/pypy/interpreter/astcompiler/test/test_compiler.py Log: "a, b += 3" now raises a SyntaxError instead of crashing Modified: pypy/branch/fast-forward/pypy/interpreter/astcompiler/codegen.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/astcompiler/codegen.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/astcompiler/codegen.py Fri Sep 24 18:07:55 2010 @@ -379,7 +379,7 @@ self.emit_op(self._op_for_augassign(assign.op)) self.name_op(target.id, ast.Store) else: - raise AssertionError("unknown augassign") + self.error("illegal expression for augmented assignment", assign) def visit_Assert(self, asrt): self.update_position(asrt.lineno) Modified: pypy/branch/fast-forward/pypy/interpreter/astcompiler/test/test_compiler.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/astcompiler/test/test_compiler.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/astcompiler/test/test_compiler.py Fri Sep 24 18:07:55 2010 @@ -704,6 +704,10 @@ source = "call(a, b, c) = 3" py.test.raises(SyntaxError, self.simple_test, source, None, None) + def test_augassig_to_sequence(self): + source = "a, b += 3" + py.test.raises(SyntaxError, self.simple_test, source, None, None) + def test_broken_setups(self): source = """if 1: try: From antocuni at codespeak.net Fri Sep 24 18:16:10 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Fri, 24 Sep 2010 18:16:10 +0200 (CEST) Subject: [pypy-svn] r77353 - pypy/trunk/pypy/module/pypyjit/test Message-ID: <20100924161610.298C9282BD4@codespeak.net> Author: antocuni Date: Fri Sep 24 18:16:08 2010 New Revision: 77353 Modified: pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py Log: removee the "assert False" thatmakes the test failing. Why wass it there in the first place?!?! Modified: pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py ============================================================================== --- pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py (original) +++ pypy/trunk/pypy/module/pypyjit/test/test_pypy_c.py Fri Sep 24 18:16:08 2010 @@ -798,7 +798,6 @@ if i > 750: a = b return sa ''', 215, ([], 12481752)) - assert False def test_array_sum(self): for tc, maxops in zip('bhilBHILfd', (38,) * 6 + (40, 40, 41, 38)): From afa at codespeak.net Fri Sep 24 18:16:59 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Fri, 24 Sep 2010 18:16:59 +0200 (CEST) Subject: [pypy-svn] r77354 - pypy/trunk/pypy/translator/c/gcc Message-ID: <20100924161659.EC454282BD4@codespeak.net> Author: afa Date: Fri Sep 24 18:16:58 2010 New Revision: 77354 Modified: pypy/trunk/pypy/translator/c/gcc/trackgcroot.py Log: Actually handle the xchgq instruction in the 64bit compiler Modified: pypy/trunk/pypy/translator/c/gcc/trackgcroot.py ============================================================================== --- pypy/trunk/pypy/translator/c/gcc/trackgcroot.py (original) +++ pypy/trunk/pypy/translator/c/gcc/trackgcroot.py Fri Sep 24 18:16:58 2010 @@ -856,7 +856,6 @@ visit_and = FunctionGcRootTracker._visit_and visit_xchgl = FunctionGcRootTracker._visit_xchg - visit_xchgq = FunctionGcRootTracker._visit_xchg # used in "xor reg, reg" to create a NULL GC ptr visit_xorl = FunctionGcRootTracker.binary_insn @@ -914,6 +913,7 @@ visit_leaq = FunctionGcRootTracker._visit_lea visit_xorq = FunctionGcRootTracker.binary_insn + visit_xchgq = FunctionGcRootTracker._visit_xchg # FIXME: similar to visit_popl for 32-bit def visit_popq(self, line): From arigo at codespeak.net Fri Sep 24 19:32:57 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 24 Sep 2010 19:32:57 +0200 (CEST) Subject: [pypy-svn] r77355 - in pypy/branch/smaller-writebarrier/pypy/rpython/memory: gc gc/test gctransform test Message-ID: <20100924173257.95B3E282B9E@codespeak.net> Author: arigo Date: Fri Sep 24 19:32:55 2010 New Revision: 77355 Added: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/inspector.py - copied unchanged from r77208, pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/inspect.py Removed: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/inspect.py Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/test/test_direct.py pypy/branch/smaller-writebarrier/pypy/rpython/memory/gctransform/framework.py pypy/branch/smaller-writebarrier/pypy/rpython/memory/test/test_gc.py Log: Change the logic on where to allocate "medium-sized" varsized objects. Copy it from the hybrid GC, so that these objects can still go to the nursery; then they need to be copied to raw-malloced memory. Also rename inspect.py into inspector.py because of conflicts with the stdlib. Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py Fri Sep 24 19:32:55 2010 @@ -108,10 +108,10 @@ "arena_size": 65536*WORD, # The maximum size of an object allocated compactly. All objects - # that are larger are just allocated with raw_malloc(). The value - # chosen here is enough for a unicode string of length 56 (on 64-bits) - # or 60 (on 32-bits). See rlib.rstring.INIT_SIZE. - "small_request_threshold": 256-WORD, + # that are larger are just allocated with raw_malloc(). Note that + # the size limit for being first allocated in the nursery is much + # larger; see below. + "small_request_threshold": 35*WORD, # Full collection threshold: after a major collection, we record # the total size consumed; and after every minor collection, if the @@ -125,7 +125,16 @@ # in regular arrays of pointers; more in arrays whose items are # larger. A value of 0 disables card marking. "card_page_indices": 128, - "card_page_indices_min": 800, # minimum number of indices for cards + + # Objects whose total size is at least 'large_object' bytes are + # allocated out of the nursery immediately. If the object + # has GC pointers in its varsized part, we use instead the + # higher limit 'large_object_gcptrs'. The idea is that + # separately allocated objects are allocated immediately "old" + # and it's not good to have too many pointers from old to young + # objects. + "large_object": 1600*WORD, + "large_object_gcptrs": 8250*WORD, } def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE, @@ -136,7 +145,8 @@ small_request_threshold=5*WORD, major_collection_threshold=2.5, card_page_indices=0, - card_page_indices_min=None, + large_object=8*WORD, + large_object_gcptrs=10*WORD, ArenaCollectionClass=None): MovingGCBase.__init__(self, config, chunk_size) assert small_request_threshold % WORD == 0 @@ -150,11 +160,17 @@ # self.card_page_indices = card_page_indices if self.card_page_indices > 0: - self.card_page_indices_min = card_page_indices_min self.card_page_shift = 0 while (1 << self.card_page_shift) < self.card_page_indices: self.card_page_shift += 1 # + # 'large_object' and 'large_object_gcptrs' limit how big objects + # can be in the nursery, so they give a lower bound on the allowed + # size of the nursery. + self.nonlarge_max = large_object - 1 + self.nonlarge_gcptrs_max = large_object_gcptrs - 1 + assert self.nonlarge_max <= self.nonlarge_gcptrs_max + # self.nursery = NULL self.nursery_free = NULL self.nursery_top = NULL @@ -218,7 +234,7 @@ else: # defaultsize = self.nursery_size - minsize = 18 * self.small_request_threshold + minsize = 2 * (self.nonlarge_gcptrs_max + 1) self.nursery_size = minsize self.allocate_nursery() # @@ -247,10 +263,11 @@ def allocate_nursery(self): debug_start("gc-set-nursery-size") debug_print("nursery size:", self.nursery_size) - # the start of the nursery: we actually allocate a tiny bit more for + # the start of the nursery: we actually allocate a bit more for # the nursery than really needed, to simplify pointer arithmetic - # in malloc_fixedsize_clear(). - extra = self.small_request_threshold + # in malloc_fixedsize_clear(). The few extra pages are never used + # anyway so it doesn't even count. + extra = self.nonlarge_gcptrs_max + 1 self.nursery = llarena.arena_malloc(self.nursery_size + extra, True) if not self.nursery: raise MemoryError("cannot allocate nursery") @@ -278,9 +295,10 @@ "'needs_finalizer' and 'contains_weakptr' both specified") result = self.malloc_with_finalizer(typeid, totalsize) # - # If totalsize is greater than small_request_threshold, ask for - # a rawmalloc. The following check should be constant-folded. - elif rawtotalsize > self.small_request_threshold: + # If totalsize is greater than nonlarge_max (which should never be + # the case in practice), ask for a rawmalloc. The following check + # should be constant-folded. + elif rawtotalsize > self.nonlarge_max: ll_assert(not contains_weakptr, "'contains_weakptr' specified for a large object") result = self._external_malloc(typeid, totalsize) @@ -315,26 +333,38 @@ ll_assert(can_collect, "!can_collect") size_gc_header = self.gcheaderbuilder.size_gc_header nonvarsize = size_gc_header + size - try: - varsize = ovfcheck(itemsize * length) - totalsize = ovfcheck(nonvarsize + varsize) - except OverflowError: - raise MemoryError # - # If totalsize is greater than small_request_threshold, ask for - # a rawmalloc. - if llmemory.raw_malloc_usage(totalsize) > self.small_request_threshold: - result = self._external_malloc_cardmark(typeid, totalsize, length) + # Compute the maximal length that makes the object still + # below 'nonlarge_max'. All the following logic is usually + # constant-folded because self.nonlarge_max, size and itemsize + # are all constants (the arguments are constant due to + # inlining) and self.has_gcptr_in_varsize() is constant-folded. + if self.has_gcptr_in_varsize(typeid): + nonlarge_max = self.nonlarge_gcptrs_max + else: + nonlarge_max = self.nonlarge_max + + if not llmemory.raw_malloc_usage(itemsize): + too_many_items = (llmemory.raw_malloc_usage(nonvarsize) > + nonlarge_max) + else: + maxlength = nonlarge_max - llmemory.raw_malloc_usage(nonvarsize) + maxlength = maxlength // llmemory.raw_malloc_usage(itemsize) + too_many_items = length > maxlength + + if too_many_items: + # + # If the total size of the object would be larger than + # 'nonlarge_max', then allocate it externally and give it + # card marks. + result = self._external_malloc_cardmark(typeid, nonvarsize, + itemsize, length) # else: - # Round the size up to the next multiple of WORD. Note that - # this is done only if totalsize <= self.small_request_threshold, - # i.e. it cannot overflow, and it keeps the property that - # totalsize <= self.small_request_threshold. + # With the above checks we know now that totalsize cannot be more + # than 'nonlarge_max'; in particular, the + and * cannot overflow. + totalsize = nonvarsize + itemsize * length totalsize = llarena.round_up_for_allocation(totalsize) - ll_assert(llmemory.raw_malloc_usage(totalsize) <= - self.small_request_threshold, - "round_up_for_allocation() rounded up too much?") # # 'totalsize' should contain at least the GC header and # the length word, so it should never be smaller than @@ -398,13 +428,24 @@ def _external_malloc(self, typeid, totalsize): """Allocate a large object using raw_malloc().""" - return self._external_malloc_cardmark(typeid, totalsize, 0) + return self._external_malloc_cardmark(typeid, totalsize, 0, 0) - def _external_malloc_cardmark(self, typeid, totalsize, length): + def _external_malloc_cardmark(self, typeid, nonvarsize, itemsize, length): """Allocate a large object using raw_malloc(), possibly as an - object with card marking enabled, if its length is large enough. - 'length' can be specified as 0 if the object is not varsized.""" + object with card marking enabled, if it has gc pointers in its + var-sized part. 'length' can be specified as 0 if the object + is not varsized.""" + # + # Compute the total size, carefully checking for overflows. + if length == 0: + totalsize = nonvarsize + else: + try: + varsize = ovfcheck(itemsize * length) + totalsize = ovfcheck(nonvarsize + varsize) + except OverflowError: + raise MemoryError # # If somebody calls this function a lot, we must eventually # force a full collection. @@ -412,7 +453,6 @@ # # Check if we need to introduce the card marker bits area. if (self.card_page_indices <= 0 # <- this check is constant-folded - or length < self.card_page_indices_min # <- must be large enough or not self.has_gcptr_in_varsize(typeid)): # <- must contain ptrs # # In these cases, we don't want a card marker bits area. @@ -979,7 +1019,7 @@ if self.header(obj).tid & GCFLAG_HAS_SHADOW == 0: # # Common case: allocate a new nonmovable location for it. - newhdr = self.ac.malloc(totalsize) + newhdr = self._alloc_out_of_nursery(totalsize) # else: # The object has already a shadow. @@ -1017,6 +1057,26 @@ self.old_objects_pointing_to_young.append(newobj) + def _alloc_out_of_nursery(self, totalsize): + """Allocate non-movable memory for an object of the given + 'totalsize' that lives so far in the nursery.""" + if llmemory.raw_malloc_usage(totalsize) > self.small_request_threshold: + # for nursery objects that are not small + arena = llarena.arena_malloc(llmemory.raw_malloc_usage(totalsize), + False) + if not arena: + raise MemoryError("cannot allocate object") + llarena.arena_reserve(arena, totalsize) + # + size_gc_header = self.gcheaderbuilder.size_gc_header + self.rawmalloced_total_size += llmemory.raw_malloc_usage(totalsize) + self.rawmalloced_objects.append(arena + size_gc_header) + return arena + else: + # most common path + return self.ac.malloc(totalsize) + + # ---------- # Full collection @@ -1242,7 +1302,8 @@ else: size_gc_header = self.gcheaderbuilder.size_gc_header size = self.get_size(obj) - shadowhdr = self.ac.malloc(size_gc_header + size) + shadowhdr = self._alloc_out_of_nursery(size_gc_header + + size) # initialize to an invalid tid *without* GCFLAG_VISITED, # so that if the object dies before the next minor # collection, the shadow will stay around but be collected Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/test/test_direct.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/test/test_direct.py (original) +++ pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/test/test_direct.py Fri Sep 24 19:32:55 2010 @@ -505,8 +505,7 @@ for index, expected_x in nums.items(): assert a[index].x == expected_x self.stackroots.pop() - test_card_marker.GC_PARAMS = {"card_page_indices": 4, - "card_page_indices_min": 7} + test_card_marker.GC_PARAMS = {"card_page_indices": 4} class TestMiniMarkGCFull(DirectGCTest): from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gctransform/framework.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/rpython/memory/gctransform/framework.py (original) +++ pypy/branch/smaller-writebarrier/pypy/rpython/memory/gctransform/framework.py Fri Sep 24 19:32:55 2010 @@ -139,7 +139,7 @@ def __init__(self, translator): from pypy.rpython.memory.gc.base import choose_gc_from_config from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP - from pypy.rpython.memory.gc import inspect + from pypy.rpython.memory.gc import inspector super(FrameworkGCTransformer, self).__init__(translator, inline=True) if hasattr(self, 'GC_PARAMS'): @@ -391,27 +391,27 @@ else: self.id_ptr = None - self.get_rpy_roots_ptr = getfn(inspect.get_rpy_roots, + self.get_rpy_roots_ptr = getfn(inspector.get_rpy_roots, [s_gc], rgc.s_list_of_gcrefs(), minimal_transform=False) - self.get_rpy_referents_ptr = getfn(inspect.get_rpy_referents, + self.get_rpy_referents_ptr = getfn(inspector.get_rpy_referents, [s_gc, s_gcref], rgc.s_list_of_gcrefs(), minimal_transform=False) - self.get_rpy_memory_usage_ptr = getfn(inspect.get_rpy_memory_usage, + self.get_rpy_memory_usage_ptr = getfn(inspector.get_rpy_memory_usage, [s_gc, s_gcref], annmodel.SomeInteger(), minimal_transform=False) - self.get_rpy_type_index_ptr = getfn(inspect.get_rpy_type_index, + self.get_rpy_type_index_ptr = getfn(inspector.get_rpy_type_index, [s_gc, s_gcref], annmodel.SomeInteger(), minimal_transform=False) - self.is_rpy_instance_ptr = getfn(inspect.is_rpy_instance, + self.is_rpy_instance_ptr = getfn(inspector.is_rpy_instance, [s_gc, s_gcref], annmodel.SomeBool(), minimal_transform=False) - self.dump_rpy_heap_ptr = getfn(inspect.dump_rpy_heap, + self.dump_rpy_heap_ptr = getfn(inspector.dump_rpy_heap, [s_gc, annmodel.SomeInteger()], annmodel.s_Bool, minimal_transform=False) Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/test/test_gc.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/rpython/memory/test/test_gc.py (original) +++ pypy/branch/smaller-writebarrier/pypy/rpython/memory/test/test_gc.py Fri Sep 24 19:32:55 2010 @@ -29,6 +29,7 @@ GC_CAN_MALLOC_NONMOVABLE = True GC_CAN_SHRINK_ARRAY = False GC_CAN_SHRINK_BIG_ARRAY = False + BUT_HOW_BIG_IS_A_BIG_STRING = 12 def setup_class(cls): cls._saved_logstate = py.log._getstate() @@ -495,7 +496,8 @@ # with larger numbers, it gets allocated outside the semispace # with some GCs. flag = self.GC_CAN_SHRINK_BIG_ARRAY - assert self.interpret(f, [12, 0, flag]) == 0x62024241 + bigsize = self.BUT_HOW_BIG_IS_A_BIG_STRING + assert self.interpret(f, [bigsize, 0, flag]) == 0x62024241 def test_tagged_simple(self): from pypy.rlib.objectmodel import UnboxedValue @@ -770,7 +772,7 @@ from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass GC_CAN_SHRINK_BIG_ARRAY = False GC_CAN_MALLOC_NONMOVABLE = True + BUT_HOW_BIG_IS_A_BIG_STRING = 11*WORD class TestMiniMarkGCCardMarking(TestMiniMarkGC): - GC_PARAMS = {'card_page_indices': 4, - 'card_page_indices_min': 10} + GC_PARAMS = {'card_page_indices': 4} From arigo at codespeak.net Fri Sep 24 19:35:32 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Fri, 24 Sep 2010 19:35:32 +0200 (CEST) Subject: [pypy-svn] r77356 - pypy/branch/smaller-writebarrier/pypy/rpython/memory/test Message-ID: <20100924173532.09C92282B9E@codespeak.net> Author: arigo Date: Fri Sep 24 19:35:31 2010 New Revision: 77356 Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/test/test_transformed_gc.py Log: Remove deprecated argument. Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/test/test_transformed_gc.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/rpython/memory/test/test_transformed_gc.py (original) +++ pypy/branch/smaller-writebarrier/pypy/rpython/memory/test/test_transformed_gc.py Fri Sep 24 19:35:31 2010 @@ -1475,7 +1475,6 @@ 'arena_size': 64*WORD, 'small_request_threshold': 5*WORD, 'card_page_indices': 4, - 'card_page_indices_min': 10, } root_stack_depth = 200 From afa at codespeak.net Fri Sep 24 19:54:49 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Fri, 24 Sep 2010 19:54:49 +0200 (CEST) Subject: [pypy-svn] r77357 - in pypy/branch/fast-forward/pypy: module/_ssl module/_ssl/test rpython/tool Message-ID: <20100924175449.467DE282B9E@codespeak.net> Author: afa Date: Fri Sep 24 19:54:47 2010 New Revision: 77357 Modified: pypy/branch/fast-forward/pypy/module/_ssl/__init__.py pypy/branch/fast-forward/pypy/module/_ssl/interp_ssl.py pypy/branch/fast-forward/pypy/module/_ssl/test/test_ssl.py pypy/branch/fast-forward/pypy/rpython/tool/rffi_platform.py Log: Add various constants: _ssl.OPENSSL_VERSION* Modified: pypy/branch/fast-forward/pypy/module/_ssl/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_ssl/__init__.py (original) +++ pypy/branch/fast-forward/pypy/module/_ssl/__init__.py Fri Sep 24 19:54:47 2010 @@ -16,7 +16,7 @@ from pypy.module._ssl.interp_ssl import constants, HAVE_OPENSSL_RAND for constant, value in constants.iteritems(): - Module.interpleveldefs[constant] = "space.wrap(%r)" % value + Module.interpleveldefs[constant] = "space.wrap(%r)" % (value,) if HAVE_OPENSSL_RAND: Module.interpleveldefs['RAND_add'] = "interp_ssl.RAND_add" Modified: pypy/branch/fast-forward/pypy/module/_ssl/interp_ssl.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_ssl/interp_ssl.py (original) +++ pypy/branch/fast-forward/pypy/module/_ssl/interp_ssl.py Fri Sep 24 19:54:47 2010 @@ -69,6 +69,8 @@ OPENSSL_VERSION_NUMBER = rffi_platform.ConstantInteger( "OPENSSL_VERSION_NUMBER") + SSLEAY_VERSION = rffi_platform.DefinedConstantString( + "SSLEAY_VERSION", "SSLeay_version(SSLEAY_VERSION)") SSL_FILETYPE_PEM = rffi_platform.ConstantInteger("SSL_FILETYPE_PEM") SSL_OP_ALL = rffi_platform.ConstantInteger("SSL_OP_ALL") SSL_VERIFY_NONE = rffi_platform.ConstantInteger("SSL_VERIFY_NONE") @@ -111,6 +113,16 @@ constants["SSL_ERROR_EOF"] = PY_SSL_ERROR_EOF constants["SSL_ERROR_INVALID_ERROR_CODE"] = PY_SSL_ERROR_INVALID_ERROR_CODE +constants["OPENSSL_VERSION_NUMBER"] = OPENSSL_VERSION_NUMBER +ver = OPENSSL_VERSION_NUMBER +ver, status = divmod(ver, 16) +ver, patch = divmod(ver, 256) +ver, fix = divmod(ver, 256) +ver, minor = divmod(ver, 256) +ver, major = divmod(ver, 256) +constants["OPENSSL_VERSION_INFO"] = (major, minor, fix, patch, status) +constants["OPENSSL_VERSION"] = SSLEAY_VERSION + def ssl_external(name, argtypes, restype, **kw): kw['compilation_info'] = eci globals()['libssl_' + name] = rffi.llexternal( Modified: pypy/branch/fast-forward/pypy/module/_ssl/test/test_ssl.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_ssl/test/test_ssl.py (original) +++ pypy/branch/fast-forward/pypy/module/_ssl/test/test_ssl.py Fri Sep 24 19:54:47 2010 @@ -26,6 +26,12 @@ assert isinstance(_ssl.SSL_ERROR_WANT_CONNECT, int) assert isinstance(_ssl.SSL_ERROR_EOF, int) assert isinstance(_ssl.SSL_ERROR_INVALID_ERROR_CODE, int) + + assert isinstance(_ssl.OPENSSL_VERSION_NUMBER, int) + assert isinstance(_ssl.OPENSSL_VERSION_INFO, tuple) + assert len(_ssl.OPENSSL_VERSION_INFO) == 5 + assert isinstance(_ssl.OPENSSL_VERSION, str) + assert 'openssl' in _ssl.OPENSSL_VERSION.lower() def test_RAND_add(self): import _ssl Modified: pypy/branch/fast-forward/pypy/rpython/tool/rffi_platform.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/tool/rffi_platform.py (original) +++ pypy/branch/fast-forward/pypy/rpython/tool/rffi_platform.py Fri Sep 24 19:54:47 2010 @@ -438,14 +438,14 @@ class DefinedConstantString(CConfigEntry): """ """ - def __init__(self, macro): + def __init__(self, macro, name=None): self.macro = macro - self.name = macro + self.name = name or macro def prepare_code(self): yield '#ifdef %s' % self.macro yield 'int i;' - yield 'char *p = %s;' % self.macro + yield 'char *p = %s;' % self.name yield 'dump("defined", 1);' yield 'for (i = 0; p[i] != 0; i++ ) {' yield ' printf("value_%d: %d\\n", i, (int)(unsigned char)p[i]);' From afa at codespeak.net Fri Sep 24 22:23:38 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Fri, 24 Sep 2010 22:23:38 +0200 (CEST) Subject: [pypy-svn] r77358 - pypy/branch/fast-forward/lib-python/2.7.0 Message-ID: <20100924202338.8559B282B9C@codespeak.net> Author: afa Date: Fri Sep 24 22:23:35 2010 New Revision: 77358 Modified: pypy/branch/fast-forward/lib-python/2.7.0/urllib.py Log: Change to a much simpler code that does not require 'bytearray' (which is not yet available in pypy.) Yes, I'm modifying the original copy of urllib.py, for two reasons: - It's a cosmetic change that will not be backported to CPython - It will be discarded when we import a new revision of the 2.7 stdlib, but at this time we will have a working bytearray and we will forget all this. Modified: pypy/branch/fast-forward/lib-python/2.7.0/urllib.py ============================================================================== --- pypy/branch/fast-forward/lib-python/2.7.0/urllib.py (original) +++ pypy/branch/fast-forward/lib-python/2.7.0/urllib.py Fri Sep 24 22:23:35 2010 @@ -1189,7 +1189,8 @@ 'abcdefghijklmnopqrstuvwxyz' '0123456789' '_.-') _safe_map = {} -for i, c in zip(xrange(256), str(bytearray(xrange(256)))): +for i in xrange(256): + c = chr(i) _safe_map[c] = c if (i < 128 and c in always_safe) else '%{:02X}'.format(i) _safe_quoters = {} From afa at codespeak.net Sat Sep 25 00:01:40 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Sat, 25 Sep 2010 00:01:40 +0200 (CEST) Subject: [pypy-svn] r77359 - pypy/branch/fast-forward/pypy/objspace/std Message-ID: <20100924220140.08CD4282B9C@codespeak.net> Author: afa Date: Sat Sep 25 00:01:39 2010 New Revision: 77359 Modified: pypy/branch/fast-forward/pypy/objspace/std/floattype.py Log: one again: float('nan') is not RPython Modified: pypy/branch/fast-forward/pypy/objspace/std/floattype.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/floattype.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/floattype.py Sat Sep 25 00:01:39 2010 @@ -106,7 +106,7 @@ i += 1 if length - i >= 2 and s[i:i + 2].lower() == "an": i += 2 - value = float("nan") + value = rarithmetic.NAN else: if (s[i] == "0" and length - i > 1 and (s[i + 1] == "x" or s[i + 1] == "X")): From afa at codespeak.net Sat Sep 25 00:31:57 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Sat, 25 Sep 2010 00:31:57 +0200 (CEST) Subject: [pypy-svn] r77360 - pypy/branch/fast-forward/pypy/module/_ssl Message-ID: <20100924223157.348BA282B9C@codespeak.net> Author: afa Date: Sat Sep 25 00:31:55 2010 New Revision: 77360 Modified: pypy/branch/fast-forward/pypy/module/_ssl/interp_ssl.py Log: Add missing constants in the _ssl module. This should fix "import ssl" Modified: pypy/branch/fast-forward/pypy/module/_ssl/interp_ssl.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_ssl/interp_ssl.py (original) +++ pypy/branch/fast-forward/pypy/module/_ssl/interp_ssl.py Sat Sep 25 00:31:55 2010 @@ -51,6 +51,11 @@ PY_SSL_ERROR_EOF = 8 # special case of SSL_ERROR_SYSCALL PY_SSL_ERROR_INVALID_ERROR_CODE = 9 +PY_SSL_CERT_NONE, PY_SSL_CERT_OPTIONAL, PY_SSL_CERT_REQUIRED = 0, 1, 2 + +(PY_SSL_VERSION_SSL2, PY_SSL_VERSION_SSL3, + PY_SSL_VERSION_SSL23, PY_SSL_VERSION_TLS1) = range(4) + SOCKET_IS_NONBLOCKING, SOCKET_IS_BLOCKING = 0, 1 SOCKET_HAS_TIMED_OUT, SOCKET_HAS_BEEN_CLOSED = 2, 3 SOCKET_TOO_LARGE_FOR_SELECT, SOCKET_OPERATION_OK = 4, 5 @@ -113,6 +118,15 @@ constants["SSL_ERROR_EOF"] = PY_SSL_ERROR_EOF constants["SSL_ERROR_INVALID_ERROR_CODE"] = PY_SSL_ERROR_INVALID_ERROR_CODE +constants["SSL_CERT_NONE"] = PY_SSL_CERT_NONE +constants["SSL_CERT_OPTIONAL"] = PY_SSL_CERT_OPTIONAL +constants["SSL_CERT_REQUIRED"] = PY_SSL_CERT_REQUIRED + +constants["PROTOCOL_SSLv2"] = PY_SSL_VERSION_SSL2 +constants["PROTOCOL_SSLv3"] = PY_SSL_VERSION_SSL3 +constants["PROTOCOL_SSLv23"] = PY_SSL_VERSION_SSL23 +constants["PROTOCOL_TLSv1"] = PY_SSL_VERSION_TLS1 + constants["OPENSSL_VERSION_NUMBER"] = OPENSSL_VERSION_NUMBER ver = OPENSSL_VERSION_NUMBER ver, status = divmod(ver, 16) From benjamin at codespeak.net Sat Sep 25 02:44:58 2010 From: benjamin at codespeak.net (benjamin at codespeak.net) Date: Sat, 25 Sep 2010 02:44:58 +0200 (CEST) Subject: [pypy-svn] r77361 - pypy/branch/fast-forward/lib-python Message-ID: <20100925004458.A14AC282B9C@codespeak.net> Author: benjamin Date: Sat Sep 25 02:44:56 2010 New Revision: 77361 Modified: pypy/branch/fast-forward/lib-python/conftest.py Log: remove unbound variable Modified: pypy/branch/fast-forward/lib-python/conftest.py ============================================================================== --- pypy/branch/fast-forward/lib-python/conftest.py (original) +++ pypy/branch/fast-forward/lib-python/conftest.py Sat Sep 25 02:44:56 2010 @@ -643,10 +643,10 @@ pypy_options.extend( ['--withmod-%s' % mod for mod in regrtest.usemodules]) sopt = " ".join(pypy_options) - cmd = "%s %s %d %s -S %s %s %s %s" %( + cmd = "%s %s %d %s -S %s %s -v %s" %( python, alarm_script, TIMEOUT, pypy_script, sopt, - regrrun, regrrun_verbosity, fspath.purebasename) + regrrun, fspath.purebasename) return cmd def runtest(self): From benjamin at codespeak.net Sat Sep 25 02:51:04 2010 From: benjamin at codespeak.net (benjamin at codespeak.net) Date: Sat, 25 Sep 2010 02:51:04 +0200 (CEST) Subject: [pypy-svn] r77362 - pypy/branch/fast-forward/lib-python Message-ID: <20100925005104.0EB63282B9C@codespeak.net> Author: benjamin Date: Sat Sep 25 02:51:03 2010 New Revision: 77362 Modified: pypy/branch/fast-forward/lib-python/conftest.py Log: fix arg order Modified: pypy/branch/fast-forward/lib-python/conftest.py ============================================================================== --- pypy/branch/fast-forward/lib-python/conftest.py (original) +++ pypy/branch/fast-forward/lib-python/conftest.py Sat Sep 25 02:51:03 2010 @@ -643,7 +643,7 @@ pypy_options.extend( ['--withmod-%s' % mod for mod in regrtest.usemodules]) sopt = " ".join(pypy_options) - cmd = "%s %s %d %s -S %s %s -v %s" %( + cmd = "%s %s %d %s -S %s %s %s -v" %( python, alarm_script, TIMEOUT, pypy_script, sopt, regrrun, fspath.purebasename) From agaynor at codespeak.net Sat Sep 25 04:45:48 2010 From: agaynor at codespeak.net (agaynor at codespeak.net) Date: Sat, 25 Sep 2010 04:45:48 +0200 (CEST) Subject: [pypy-svn] r77363 - pypy/branch/fast-forward/pypy/module/__builtin__ Message-ID: <20100925024548.4B2AD282B9C@codespeak.net> Author: agaynor Date: Sat Sep 25 04:45:44 2010 New Revision: 77363 Modified: pypy/branch/fast-forward/pypy/module/__builtin__/functional.py Log: Fix 2 tests in functools that fail because our docstring for max is different from CPython's. Modified: pypy/branch/fast-forward/pypy/module/__builtin__/functional.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/__builtin__/functional.py (original) +++ pypy/branch/fast-forward/pypy/module/__builtin__/functional.py Sat Sep 25 04:45:44 2010 @@ -164,9 +164,11 @@ return w_max_item def max(space, __args__): - """Return the largest item in a sequence. - - If more than one argument is passed, return the maximum of them. + """max(iterable[, key=func]) -> value + max(a, b, c, ...[, key=func]) -> value + + With a single iterable argument, return its largest item. + With two or more arguments, return the largest argument. """ return min_max(space, __args__, "max") max.unwrap_spec = [ObjSpace, Arguments] From afa at codespeak.net Sat Sep 25 09:42:21 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Sat, 25 Sep 2010 09:42:21 +0200 (CEST) Subject: [pypy-svn] r77364 - pypy/branch/fast-forward/pypy/module/_ssl Message-ID: <20100925074221.2B3AA282BAD@codespeak.net> Author: afa Date: Sat Sep 25 09:42:19 2010 New Revision: 77364 Modified: pypy/branch/fast-forward/pypy/module/_ssl/interp_ssl.py Log: Used wrong constant names... Modified: pypy/branch/fast-forward/pypy/module/_ssl/interp_ssl.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_ssl/interp_ssl.py (original) +++ pypy/branch/fast-forward/pypy/module/_ssl/interp_ssl.py Sat Sep 25 09:42:19 2010 @@ -118,9 +118,9 @@ constants["SSL_ERROR_EOF"] = PY_SSL_ERROR_EOF constants["SSL_ERROR_INVALID_ERROR_CODE"] = PY_SSL_ERROR_INVALID_ERROR_CODE -constants["SSL_CERT_NONE"] = PY_SSL_CERT_NONE -constants["SSL_CERT_OPTIONAL"] = PY_SSL_CERT_OPTIONAL -constants["SSL_CERT_REQUIRED"] = PY_SSL_CERT_REQUIRED +constants["CERT_NONE"] = PY_SSL_CERT_NONE +constants["CERT_OPTIONAL"] = PY_SSL_CERT_OPTIONAL +constants["CERT_REQUIRED"] = PY_SSL_CERT_REQUIRED constants["PROTOCOL_SSLv2"] = PY_SSL_VERSION_SSL2 constants["PROTOCOL_SSLv3"] = PY_SSL_VERSION_SSL3 From hakanardo at codespeak.net Sat Sep 25 11:07:43 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Sat, 25 Sep 2010 11:07:43 +0200 (CEST) Subject: [pypy-svn] r77365 - in pypy/branch/jit-loop-invaraints/pypy/jit: metainterp metainterp/optimizeopt metainterp/test tl Message-ID: <20100925090743.40790282BAD@codespeak.net> Author: hakanardo Date: Sat Sep 25 11:07:41 2010 New Revision: 77365 Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/compile.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/history.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/warmstate.py pypy/branch/jit-loop-invaraints/pypy/jit/tl/pypyjit_demo.py Log: Preambles inlined into bridges Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/compile.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/compile.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/compile.py Sat Sep 25 11:07:41 2010 @@ -15,6 +15,7 @@ from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.jit.metainterp.optimizeutil import InvalidLoop from pypy.jit.codewriter import heaptracker +from pypy.rlib.debug import debug_print def giveup(): from pypy.jit.metainterp.pyjitpl import SwitchToBlackhole @@ -82,6 +83,7 @@ send_loop_to_backend(metainterp_sd, loop, "loop") send_loop_to_backend(metainterp_sd, loop.preamble, "loop") insert_loop_token(old_loop_tokens, loop.preamble.token) + loop.preamble.token.inlinable = loop.preamble return loop.preamble.token else: send_loop_to_backend(metainterp_sd, loop, "loop") @@ -499,6 +501,9 @@ new_loop.greenkey = self.original_greenkey new_loop.inputargs = self.redkey new_loop.token = new_loop_token + print + print "HEEEEEEEEEEEEEEEEEEEERRRRRRRRRRRRRRRRRRRRRRRR" + print new_loop_token send_loop_to_backend(metainterp_sd, new_loop, "entry bridge") # send the new_loop to warmspot.py, to be called directly the next time jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( @@ -530,10 +535,13 @@ new_loop.operations = [op.clone() for op in metainterp.history.operations] metainterp_sd = metainterp.staticdata state = metainterp.jitdriver_sd.warmstate + debug_print("\ncnb: ") try: target_loop_token = state.optimize_bridge(metainterp_sd, old_loop_tokens, new_loop) + debug_print("cnb: ", new_loop.operations[-1], target_loop_token) + except InvalidLoop: # XXX I am fairly convinced that optimize_bridge cannot actually raise # InvalidLoop @@ -550,7 +558,9 @@ op = new_loop.operations[-1] if not isinstance(target_loop_token, TerminatingLoopToken): # normal case - op.descr = target_loop_token # patch the jump target + # FIXME: When is this patching needed? + #op.descr = target_loop_token # patch the jump target + pass else: # The target_loop_token is a pseudo loop token, # e.g. loop_tokens_done_with_this_frame_void[0] Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/history.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/history.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/history.py Sat Sep 25 11:07:41 2010 @@ -703,6 +703,7 @@ # specnodes = ... # and more data specified by the backend when the loop is compiled number = 0 + inlinable = None def __init__(self, number=0): self.number = number @@ -710,6 +711,9 @@ def repr_of_descr(self): return '' % self.number + def __repr__(self): + return '' % self.number + class TreeLoop(object): inputargs = None operations = None Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py Sat Sep 25 11:07:41 2010 @@ -1,6 +1,7 @@ from pypy.jit.metainterp.optimizeopt.optimizer import * from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.compile import prebuiltNotSpecNode +from pypy.rlib.debug import debug_print class OptInvariant(Optimization): """Move loop invariant code into a preamble. @@ -64,14 +65,11 @@ preamble.token.preamble = preamble return - elif op.descr.preamble: + elif op.descr.inlinable: # Bridge calling a loop with preamble, inline it # - print - print "hi: ", op - print loop - print - self.inline(op.descr.preamble, op.args) + debug_print("Inlining: ", op, "into", loop) + self.inline(op.descr.inlinable, op.args) return elif (op.is_always_pure()):# or op.is_foldable_guard() or op.is_ovf()): @@ -113,6 +111,7 @@ if op.result: newop.result = op.result.clonebox() argmap[op.result] = newop.result + debug_print(" ", newop) self.emit_operation(newop) Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py Sat Sep 25 11:07:41 2010 @@ -12,6 +12,7 @@ from pypy.rpython.lltypesystem import lltype from pypy.jit.metainterp.history import AbstractDescr, make_hashable_int from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded +from pypy.rlib.debug import debug_print LEVEL_UNKNOWN = '\x00' LEVEL_NONNULL = '\x01' Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/virtualize.py Sat Sep 25 11:07:41 2010 @@ -7,7 +7,7 @@ from pypy.jit.metainterp.optimizeutil import _findall from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.optimizeopt.optimizer import * - +from pypy.rlib.debug import debug_print class AbstractVirtualValue(OptValue): _attrs_ = ('optimizer', 'keybox', 'source_op', '_cached_vinfo') @@ -283,10 +283,10 @@ return vvalue def optimize_JUMP(self, op): + # FIXME: When do we need orgop instead of op? orgop = self.optimizer.loop.operations[-1] - #orgop.descr = op.descr orgop = op - print 'Nbr: ', op.descr.number + debug_print("optimize_JUMP(",op,")") exitargs = [] target_loop_token = orgop.descr assert isinstance(target_loop_token, LoopToken) @@ -296,6 +296,7 @@ value = self.getvalue(op.args[i]) specnodes[i].teardown_virtual_node(self, value, exitargs) op.args = exitargs[:] + debug_print(" ", op) self.emit_operation(op) def optimize_VIRTUAL_REF(self, op): Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py Sat Sep 25 11:07:41 2010 @@ -334,11 +334,11 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 308 - self.check_loop_count(2) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'int_mul': 1, - 'jump': 2}) + self.check_loop_count(1) + self.check_loops({'guard_true': 1, 'guard_no_overflow': 1, + 'int_add': 2, 'int_sub': 1, 'int_gt': 1, + 'int_mul': 1, 'int_mul_ovf': 1, + 'jump': 1}) def test_loop_invariant_mul_guard(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -353,7 +353,7 @@ y -= 1 return res res = self.meta_interp(f, [6, 16]) - assert res == 265 + assert res == 919 self.check_loop_count(3) def test_loop_invariant_intbox(self): Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/warmstate.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/warmstate.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/warmstate.py Sat Sep 25 11:07:41 2010 @@ -269,6 +269,7 @@ # set counter to -2, to mean "tracing in effect" cell.counter = -2 try: + print "compile and run" loop_token = metainterp.compile_and_run_once(jitdriver_sd, *args) finally: @@ -287,6 +288,8 @@ set_future_values(*args[num_green_args:]) loop_token = cell.entry_loop_token + print "# ---------- execute assembler ----------" + print "Loop", loop_token.number # ---------- execute assembler ---------- while True: # until interrupted by an exception metainterp_sd.profiler.start_running() Modified: pypy/branch/jit-loop-invaraints/pypy/jit/tl/pypyjit_demo.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/tl/pypyjit_demo.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/tl/pypyjit_demo.py Sat Sep 25 11:07:41 2010 @@ -82,16 +82,16 @@ try: from array import array - def f(): - i = 0 - a = 1 - sa = array('d', (0,0)) - while i < 500000000: - sa[0] += a*a - i += 1 - return sa + i = 0 + a = 1 + sa = array('d', (0,0)) + while i < 5: + sa[0] += a*a + i += 1 + return sa +# from tvid import f print f() except Exception, e: From afa at codespeak.net Sat Sep 25 13:47:52 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Sat, 25 Sep 2010 13:47:52 +0200 (CEST) Subject: [pypy-svn] r77366 - pypy/branch/fast-forward/pypy/module/select/test Message-ID: <20100925114752.136B4282BAD@codespeak.net> Author: afa Date: Sat Sep 25 13:47:51 2010 New Revision: 77366 Modified: pypy/branch/fast-forward/pypy/module/select/test/test_select.py Log: A 2.7 socket "relies on reference counting to close the underlying socket object". I don't like this, but the call to gc.collect() is needed for the test to pass with the -A option on pypy-c. I suspect there will be a lot more of theses issues. Modified: pypy/branch/fast-forward/pypy/module/select/test/test_select.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/select/test/test_select.py (original) +++ pypy/branch/fast-forward/pypy/module/select/test/test_select.py Sat Sep 25 13:47:51 2010 @@ -112,6 +112,7 @@ # more data is available if sys.platform != 'win32': writeend.close() + import gc; gc.collect() assert 1 <= total_out <= 512 total_in = 0 while True: @@ -140,6 +141,7 @@ readend, writeend = self.getpair() try: readend.close() + import gc; gc.collect() iwtd, owtd, ewtd = select.select([writeend], [], [], 0) assert iwtd == [writeend] assert owtd == ewtd == [] From hakanardo at codespeak.net Sun Sep 26 10:51:37 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Sun, 26 Sep 2010 10:51:37 +0200 (CEST) Subject: [pypy-svn] r77374 - in pypy/branch/jit-loop-invaraints/pypy/jit/metainterp: . optimizeopt test Message-ID: <20100926085137.0F1A5282BAD@codespeak.net> Author: hakanardo Date: Sun Sep 26 10:51:35 2010 New Revision: 77374 Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/compile.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/history.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimize.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/simple_optimize.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/warmstate.py Log: Passing all metainterp tests except optimizeopt Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/compile.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/compile.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/compile.py Sun Sep 26 10:51:35 2010 @@ -501,9 +501,7 @@ new_loop.greenkey = self.original_greenkey new_loop.inputargs = self.redkey new_loop.token = new_loop_token - print - print "HEEEEEEEEEEEEEEEEEEEERRRRRRRRRRRRRRRRRRRRRRRR" - print new_loop_token + send_loop_to_backend(metainterp_sd, new_loop, "entry bridge") # send the new_loop to warmspot.py, to be called directly the next time jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( @@ -535,13 +533,10 @@ new_loop.operations = [op.clone() for op in metainterp.history.operations] metainterp_sd = metainterp.staticdata state = metainterp.jitdriver_sd.warmstate - debug_print("\ncnb: ") try: target_loop_token = state.optimize_bridge(metainterp_sd, old_loop_tokens, new_loop) - debug_print("cnb: ", new_loop.operations[-1], target_loop_token) - except InvalidLoop: # XXX I am fairly convinced that optimize_bridge cannot actually raise # InvalidLoop Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/history.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/history.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/history.py Sun Sep 26 10:51:35 2010 @@ -120,7 +120,8 @@ raise NotImplementedError class AbstractDescr(AbstractValue): - __slots__ = () + __slots__ = ('inlinable',) + inlinable = None def repr_of_descr(self): return '%r' % (self,) @@ -703,7 +704,6 @@ # specnodes = ... # and more data specified by the backend when the loop is compiled number = 0 - inlinable = None def __init__(self, number=0): self.number = number Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimize.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimize.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimize.py Sun Sep 26 10:51:35 2010 @@ -1,4 +1,4 @@ -from pypy.rlib.debug import debug_start, debug_stop +from pypy.rlib.debug import debug_start, debug_stop, debug_print # ____________________________________________________________ Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py Sun Sep 26 10:51:35 2010 @@ -2,6 +2,7 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.compile import prebuiltNotSpecNode from pypy.rlib.debug import debug_print +from pypy.jit.metainterp.history import Const class OptInvariant(Optimization): """Move loop invariant code into a preamble. @@ -25,16 +26,7 @@ v = self.getvalue(arg_in) v.invariant = True - def invariant_boxes(self): - invariant_boxes = [] - for op in self.optimizer.preamble: - if self.get_constant_box(op.result) is None: - v = self.getvalue(op.result) - v.invariant = True - box = v.force_box() - if box and box not in invariant_boxes: - invariant_boxes.append(box) - return invariant_boxes + self.invariant_boxes = [] def propagate_forward(self, op): @@ -46,7 +38,7 @@ preamble = loop.preamble preamble.inputargs = loop.inputargs[:] - invariant_boxes = self.invariant_boxes() + invariant_boxes = self.invariant_boxes loop.inputargs.extend(invariant_boxes) op.args = op.args + invariant_boxes preamble.operations = self.optimizer.preamble @@ -79,6 +71,10 @@ if self.get_constant_box(op.result) is None: v = self.getvalue(op.result) v.invariant = True + box = v.force_box() + if box and box not in self.invariant_boxes: + self.invariant_boxes.append(box) + return @@ -107,7 +103,10 @@ argmap[loop.inputargs[i]] = inputargs[i] for op in loop.operations: newop = op.clone() - newop.args = [argmap[a] for a in op.args] + for i in range(len(op.args)): + a = op.args[i] + if not isinstance(a, Const): + newop.args[i] = argmap[a] if op.result: newop.result = op.result.clonebox() argmap[op.result] = newop.result Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/simple_optimize.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/simple_optimize.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/simple_optimize.py Sun Sep 26 10:51:35 2010 @@ -4,6 +4,7 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp import resume, compile +from pypy.rlib.debug import debug_print EMPTY_VALUES = {} @@ -47,4 +48,5 @@ def optimize_bridge(metainterp_sd, old_loops, loop): optimize_loop(metainterp_sd, [], loop) + loop.operations[-1].descr = old_loops[0] return old_loops[0] Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py Sun Sep 26 10:51:35 2010 @@ -340,7 +340,7 @@ 'int_mul': 1, 'int_mul_ovf': 1, 'jump': 1}) - def test_loop_invariant_mul_guard(self): + def test_loop_invariant_mul_bridge1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) def f(x, y): res = 0 @@ -356,6 +356,26 @@ assert res == 919 self.check_loop_count(3) + def test_loop_invariant_mul_bridge_maintaining(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + res += x * x + if y<8: + res += 1 + y -= 1 + return res + res = self.meta_interp(f, [6, 16]) + assert res == 583 + self.check_loop_count(3) + self.check_loops({'int_lt': 1, 'int_gt': 1, + 'guard_false': 1, 'guard_true': 1, + 'int_sub': 2, 'int_mul': 2, 'int_add': 2, + 'jump': 3}) + def test_loop_invariant_intbox(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) class I: Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/warmstate.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/warmstate.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/warmstate.py Sun Sep 26 10:51:35 2010 @@ -13,6 +13,7 @@ from pypy.rlib.debug import debug_start, debug_stop, debug_print from pypy.jit.metainterp import history from pypy.jit.codewriter import support, heaptracker +from pypy.rlib.debug import debug_print # ____________________________________________________________ From arigo at codespeak.net Sun Sep 26 13:50:07 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 26 Sep 2010 13:50:07 +0200 (CEST) Subject: [pypy-svn] r77375 - pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc Message-ID: <20100926115007.5758E282BAD@codespeak.net> Author: arigo Date: Sun Sep 26 13:50:05 2010 New Revision: 77375 Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py Log: Clean-up: replace a lot of functions with similar functionality with a single big function with a clear interface. Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py Sun Sep 26 13:50:05 2010 @@ -1,6 +1,7 @@ import sys from pypy.rpython.lltypesystem import lltype, llmemory, llarena, llgroup from pypy.rpython.lltypesystem.lloperation import llop +from pypy.rpython.lltypesystem.llmemory import raw_malloc_usage from pypy.rpython.memory.gc.base import GCBase, MovingGCBase from pypy.rpython.memory.gc import minimarkpage, base, generation from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE @@ -286,14 +287,15 @@ ll_assert(can_collect, "!can_collect") size_gc_header = self.gcheaderbuilder.size_gc_header totalsize = size_gc_header + size - rawtotalsize = llmemory.raw_malloc_usage(totalsize) + rawtotalsize = raw_malloc_usage(totalsize) # # If the object needs a finalizer, ask for a rawmalloc. # The following check should be constant-folded. if needs_finalizer: ll_assert(not contains_weakptr, "'needs_finalizer' and 'contains_weakptr' both specified") - result = self.malloc_with_finalizer(typeid, totalsize) + obj = self.external_malloc(typeid, 0) + self.objects_with_finalizers.append(obj) # # If totalsize is greater than nonlarge_max (which should never be # the case in practice), ask for a rawmalloc. The following check @@ -301,12 +303,12 @@ elif rawtotalsize > self.nonlarge_max: ll_assert(not contains_weakptr, "'contains_weakptr' specified for a large object") - result = self._external_malloc(typeid, totalsize) + obj = self.external_malloc(typeid, 0) # else: # If totalsize is smaller than minimal_size_in_nursery, round it # up. The following check should also be constant-folded. - min_size = llmemory.raw_malloc_usage(self.minimal_size_in_nursery) + min_size = raw_malloc_usage(self.minimal_size_in_nursery) if rawtotalsize < min_size: totalsize = rawtotalsize = min_size # @@ -324,8 +326,10 @@ # If it is a weakref, record it (check constant-folded). if contains_weakptr: self.young_objects_with_weakrefs.append(result+size_gc_header) + # + obj = result + size_gc_header # - return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF) + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) def malloc_varsize_clear(self, typeid, length, size, itemsize, @@ -344,21 +348,18 @@ else: nonlarge_max = self.nonlarge_max - if not llmemory.raw_malloc_usage(itemsize): - too_many_items = (llmemory.raw_malloc_usage(nonvarsize) > - nonlarge_max) + if not raw_malloc_usage(itemsize): + too_many_items = raw_malloc_usage(nonvarsize) > nonlarge_max else: - maxlength = nonlarge_max - llmemory.raw_malloc_usage(nonvarsize) - maxlength = maxlength // llmemory.raw_malloc_usage(itemsize) + maxlength = nonlarge_max - raw_malloc_usage(nonvarsize) + maxlength = maxlength // raw_malloc_usage(itemsize) too_many_items = length > maxlength if too_many_items: # # If the total size of the object would be larger than - # 'nonlarge_max', then allocate it externally and give it - # card marks. - result = self._external_malloc_cardmark(typeid, nonvarsize, - itemsize, length) + # 'nonlarge_max', then allocate it externally. + obj = self.external_malloc(typeid, length) # else: # With the above checks we know now that totalsize cannot be more @@ -369,8 +370,8 @@ # 'totalsize' should contain at least the GC header and # the length word, so it should never be smaller than # 'minimal_size_in_nursery' - ll_assert(llmemory.raw_malloc_usage(totalsize) >= - llmemory.raw_malloc_usage(self.minimal_size_in_nursery), + ll_assert(raw_malloc_usage(totalsize) >= + raw_malloc_usage(self.minimal_size_in_nursery), "malloc_varsize_clear(): totalsize < minimalsize") # # Get the memory from the nursery. If there is not enough space @@ -383,10 +384,12 @@ # Build the object. llarena.arena_reserve(result, totalsize) self.init_gc_object(result, typeid, flags=0) + # + # Set the length and return the object. + obj = result + size_gc_header + (obj + offset_to_length).signed[0] = length # - # Set the length and return the object. - (result + size_gc_header + offset_to_length).signed[0] = length - return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF) + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) def collect(self, gen=1): @@ -419,28 +422,22 @@ collect_and_reserve._dont_inline_ = True - def _full_collect_if_needed(self, reserving_size): - reserving_size = llmemory.raw_malloc_usage(reserving_size) - if (float(self.get_total_memory_used()) + reserving_size > - self.next_major_collection_threshold): - self.minor_collection() - self.major_collection(reserving_size) - - def _external_malloc(self, typeid, totalsize): - """Allocate a large object using raw_malloc().""" - return self._external_malloc_cardmark(typeid, totalsize, 0, 0) - - - def _external_malloc_cardmark(self, typeid, nonvarsize, itemsize, length): - """Allocate a large object using raw_malloc(), possibly as an - object with card marking enabled, if it has gc pointers in its - var-sized part. 'length' can be specified as 0 if the object - is not varsized.""" + def external_malloc(self, typeid, length): + """Allocate a large object using the ArenaCollection or + raw_malloc(), possibly as an object with card marking enabled, + if it has gc pointers in its var-sized part. 'length' should be + specified as 0 if the object is not varsized. The returned + object is fully initialized and zero-filled.""" # # Compute the total size, carefully checking for overflows. + size_gc_header = self.gcheaderbuilder.size_gc_header + nonvarsize = size_gc_header + self.fixed_size(typeid) if length == 0: + # this includes the case of fixed-size objects, for which we + # should not even ask for the varsize_item_sizes(). totalsize = nonvarsize else: + itemsize = self.varsize_item_sizes(typeid) try: varsize = ovfcheck(itemsize * length) totalsize = ovfcheck(nonvarsize + varsize) @@ -449,85 +446,79 @@ # # If somebody calls this function a lot, we must eventually # force a full collection. - self._full_collect_if_needed(totalsize) + if (float(self.get_total_memory_used()) + raw_malloc_usage(totalsize) > + self.next_major_collection_threshold): + self.minor_collection() + self.major_collection(raw_malloc_usage(totalsize)) # - # Check if we need to introduce the card marker bits area. - if (self.card_page_indices <= 0 # <- this check is constant-folded - or not self.has_gcptr_in_varsize(typeid)): # <- must contain ptrs + # Check if the object would fit in the ArenaCollection. + if raw_malloc_usage(totalsize) <= self.small_request_threshold: # - # In these cases, we don't want a card marker bits area. - cardheadersize = 0 + # Yes. Round up 'totalsize' (it cannot overflow and it + # must remain <= self.small_request_threshold.) + totalsize = llarena.round_up_for_allocation(totalsize) + ll_assert(raw_malloc_usage(totalsize) <= + self.small_request_threshold, + "rounding up made totalsize > small_request_threshold") + # + # Allocate from the ArenaCollection and clear the memory returned. + result = self.ac.malloc(totalsize) + llmemory.raw_memclear(result, totalsize) extra_flags = 0 # else: - # Reserve N extra words containing card bits before the object. - extra_words = self.card_marking_words_for_length(length) - cardheadersize = WORD * extra_words - extra_flags = GCFLAG_HAS_CARDS - # - allocsize = cardheadersize + llmemory.raw_malloc_usage(totalsize) - # - # Allocate the object using arena_malloc(), which we assume here - # is just the same as raw_malloc(), but allows the extra flexibility - # of saying that we have extra words in the header. - arena = llarena.arena_malloc(allocsize, False) - if not arena: - raise MemoryError("cannot allocate large object") - # - # Clear it using method 2 of llarena.arena_reset(), which is the - # same as just a raw_memclear(). - llarena.arena_reset(arena, allocsize, 2) - # - # Reserve the card mark as a list of single bytes - # (the loop is empty in C). - i = 0 - while i < cardheadersize: - llarena.arena_reserve(arena + i, llmemory.sizeof(lltype.Char)) - i += 1 - # - # Initialize the object. - result = arena + cardheadersize - llarena.arena_reserve(result, totalsize) - self.init_gc_object(result, typeid, GCFLAG_NO_YOUNG_PTRS | extra_flags) - # - # Record the newly allocated object and its size. - size_gc_header = self.gcheaderbuilder.size_gc_header - self.rawmalloced_total_size += llmemory.raw_malloc_usage(totalsize) - self.rawmalloced_objects.append(result + size_gc_header) - return result - _external_malloc_cardmark._dont_inline_ = True - - - def _malloc_nonmovable(self, typeid, totalsize): - """Allocate an object non-movable.""" - # - rawtotalsize = llmemory.raw_malloc_usage(totalsize) - if rawtotalsize > self.small_request_threshold: + # No, so proceed to allocate it externally with raw_malloc(). + # Check if we need to introduce the card marker bits area. + if (self.card_page_indices <= 0 # <- this check is constant-folded + or not self.has_gcptr_in_varsize(typeid) or + raw_malloc_usage(totalsize) <= self.nonlarge_gcptrs_max): + # + # In these cases, we don't want a card marker bits area. + # This case also includes all fixed-size objects. + cardheadersize = 0 + extra_flags = 0 + # + else: + # Reserve N extra words containing card bits before the object. + extra_words = self.card_marking_words_for_length(length) + cardheadersize = WORD * extra_words + extra_flags = GCFLAG_HAS_CARDS + # + allocsize = cardheadersize + raw_malloc_usage(totalsize) + # + # Allocate the object using arena_malloc(), which we assume here + # is just the same as raw_malloc(), but allows the extra + # flexibility of saying that we have extra words in the header. + arena = llarena.arena_malloc(allocsize, False) + if not arena: + raise MemoryError("cannot allocate large object") # - # The size asked for is too large for the ArenaCollection. - return self._external_malloc(typeid, totalsize) - # - totalsize = llarena.round_up_for_allocation(totalsize) - # - # If somebody calls _malloc_nonmovable() a lot, we must eventually - # force a full collection. - self._full_collect_if_needed(totalsize) - # - # Ask the ArenaCollection to do the malloc. - result = self.ac.malloc(totalsize) - llmemory.raw_memclear(result, totalsize) - self.init_gc_object(result, typeid, GCFLAG_NO_YOUNG_PTRS) - return result - - - def malloc_with_finalizer(self, typeid, totalsize): - """Allocate an object with a finalizer.""" + # Clear it using method 2 of llarena.arena_reset(), which is the + # same as just a raw_memclear(). This also clears the card mark + # bits, if any. + llarena.arena_reset(arena, allocsize, 2) + # + # Reserve the card mark bits as a list of single bytes + # (the loop is empty in C). + i = 0 + while i < cardheadersize: + llarena.arena_reserve(arena + i, llmemory.sizeof(lltype.Char)) + i += 1 + # + # Reserve the actual object. (This is also a no-op in C). + result = arena + cardheadersize + llarena.arena_reserve(result, totalsize) + # + # Record the newly allocated object and its size. + self.rawmalloced_total_size += raw_malloc_usage(totalsize) + self.rawmalloced_objects.append(result + size_gc_header) # - result = self._malloc_nonmovable(typeid, totalsize) - size_gc_header = self.gcheaderbuilder.size_gc_header - self.objects_with_finalizers.append(result + size_gc_header) - return result - malloc_with_finalizer._dont_inline_ = True + # Common code to fill the header and length of the object. + self.init_gc_object(result, typeid, GCFLAG_NO_YOUNG_PTRS | extra_flags) + if self.is_varsize(typeid): + offset_to_length = self.varsize_offset_to_length(typeid) + (result + size_gc_header + offset_to_length).signed[0] = length + return result + size_gc_header # ---------- @@ -569,37 +560,16 @@ def malloc_fixedsize_nonmovable(self, typeid): - """NOT_RPYTHON: not tested translated""" - size_gc_header = self.gcheaderbuilder.size_gc_header - totalsize = size_gc_header + self.fixed_size(typeid) - # - result = self._malloc_nonmovable(typeid, totalsize) - obj = result + size_gc_header + obj = self.external_malloc(typeid, 0) return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) def malloc_varsize_nonmovable(self, typeid, length): - size_gc_header = self.gcheaderbuilder.size_gc_header - nonvarsize = size_gc_header + self.fixed_size(typeid) - itemsize = self.varsize_item_sizes(typeid) - offset_to_length = self.varsize_offset_to_length(typeid) - try: - varsize = ovfcheck(itemsize * length) - totalsize = ovfcheck(nonvarsize + varsize) - except OverflowError: - raise MemoryError - # - result = self._malloc_nonmovable(typeid, totalsize) - obj = result + size_gc_header - (obj + offset_to_length).signed[0] = length + obj = self.external_malloc(typeid, length) return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) def malloc_nonmovable(self, typeid, length, zero): # helper for testing, same as GCBase.malloc - if self.is_varsize(typeid): - gcref = self.malloc_varsize_nonmovable(typeid, length) - else: - gcref = self.malloc_fixedsize_nonmovable(typeid) - return llmemory.cast_ptr_to_adr(gcref) + return self.external_malloc(typeid, length or 0) # None -> 0 # ---------- @@ -1019,7 +989,7 @@ if self.header(obj).tid & GCFLAG_HAS_SHADOW == 0: # # Common case: allocate a new nonmovable location for it. - newhdr = self._alloc_out_of_nursery(totalsize) + newhdr = self._malloc_out_of_nursery(totalsize) # else: # The object has already a shadow. @@ -1057,24 +1027,27 @@ self.old_objects_pointing_to_young.append(newobj) - def _alloc_out_of_nursery(self, totalsize): + def _malloc_out_of_nursery(self, totalsize): """Allocate non-movable memory for an object of the given 'totalsize' that lives so far in the nursery.""" - if llmemory.raw_malloc_usage(totalsize) > self.small_request_threshold: - # for nursery objects that are not small - arena = llarena.arena_malloc(llmemory.raw_malloc_usage(totalsize), - False) - if not arena: - raise MemoryError("cannot allocate object") - llarena.arena_reserve(arena, totalsize) - # - size_gc_header = self.gcheaderbuilder.size_gc_header - self.rawmalloced_total_size += llmemory.raw_malloc_usage(totalsize) - self.rawmalloced_objects.append(arena + size_gc_header) - return arena - else: + if raw_malloc_usage(totalsize) <= self.small_request_threshold: # most common path return self.ac.malloc(totalsize) + else: + # for nursery objects that are not small + return self._malloc_out_of_nursery_nonsmall(totalsize) + _malloc_out_of_nursery._always_inline_ = True + + def _malloc_out_of_nursery_nonsmall(self, totalsize): + arena = llarena.arena_malloc(raw_malloc_usage(totalsize), False) + if not arena: + raise MemoryError("cannot allocate object") + llarena.arena_reserve(arena, totalsize) + # + size_gc_header = self.gcheaderbuilder.size_gc_header + self.rawmalloced_total_size += raw_malloc_usage(totalsize) + self.rawmalloced_objects.append(arena + size_gc_header) + return arena # ---------- @@ -1201,7 +1174,7 @@ self.rawmalloced_objects.append(obj) else: totalsize = size_gc_header + self.get_size(obj) - rawtotalsize = llmemory.raw_malloc_usage(totalsize) + rawtotalsize = raw_malloc_usage(totalsize) self.rawmalloced_total_size -= rawtotalsize arena = llarena.getfakearenaaddress(obj - size_gc_header) # @@ -1302,8 +1275,8 @@ else: size_gc_header = self.gcheaderbuilder.size_gc_header size = self.get_size(obj) - shadowhdr = self._alloc_out_of_nursery(size_gc_header + - size) + shadowhdr = self._malloc_out_of_nursery(size_gc_header + + size) # initialize to an invalid tid *without* GCFLAG_VISITED, # so that if the object dies before the next minor # collection, the shadow will stay around but be collected @@ -1497,7 +1470,7 @@ self.total_memory_used = 0 def malloc(self, size): - nsize = llmemory.raw_malloc_usage(size) + nsize = raw_malloc_usage(size) ll_assert(nsize > 0, "malloc: size is null or negative") ll_assert(nsize <= self.small_request_threshold,"malloc: size too big") ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned") From arigo at codespeak.net Sun Sep 26 13:56:58 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 26 Sep 2010 13:56:58 +0200 (CEST) Subject: [pypy-svn] r77376 - in pypy/branch/jit-str/pypy/jit: codewriter codewriter/test metainterp metainterp/optimizeopt metainterp/test Message-ID: <20100926115658.A1E0E282BAD@codespeak.net> Author: arigo Date: Sun Sep 26 13:56:56 2010 New Revision: 77376 Added: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/string.py (contents, props changed) Modified: pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py pypy/branch/jit-str/pypy/jit/codewriter/support.py pypy/branch/jit-str/pypy/jit/codewriter/test/test_support.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/__init__.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/jit-str/pypy/jit/metainterp/resume.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_resume.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py pypy/branch/jit-str/pypy/jit/metainterp/warmstate.py Log: - Implement a number of variants of string equality, and pick the best one in optimizeopt. - Refactor the string-optimization code in its own file, string.py. - Change in resume.py: the nice two-steps building of objects -- first allocate them all, then fill them all -- no longer works with strings, because we need to build the strings in a specific order (e.g. a string concatenation needs to know the two concatenated strings' lengths before it can even allocate the result). Rely on recursion instead. Modified: pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py Sun Sep 26 13:56:56 2010 @@ -19,9 +19,18 @@ OS_NONE = 0 # normal case, no oopspec OS_ARRAYCOPY = 1 # "list.ll_arraycopy" OS_STR_CONCAT = 2 # "stroruni.concat" - OS_STR_SLICE = 3 # "stroruni.slice" - OS_UNI_CONCAT = 4 # "stroruni.concat" + OS_UNI_CONCAT = 3 # "stroruni.concat" + OS_STR_SLICE = 4 # "stroruni.slice" OS_UNI_SLICE = 5 # "stroruni.slice" + OS_STR_EQUAL = 6 # "stroruni.equal" + OS_UNI_EQUAL = 7 # "stroruni.equal" + OS_STREQ_SLICE_CHECKNULL = 8 # s2!=NULL and s1[x:x+length]==s2 + OS_STREQ_SLICE_NONNULL = 9 # s1[x:x+length]==s2 (assert s2!=NULL) + OS_STREQ_SLICE_CHAR = 10 # s1[x:x+length]==char + OS_STREQ_NONNULL = 11 # s1 == s2 (assert s1!=NULL,s2!=NULL) + OS_STREQ_NONNULL_CHAR = 12 # s1 == char (assert s1!=NULL) + OS_STREQ_CHECKNULL_CHAR = 13 # s1!=NULL and s1==char + OS_STREQ_LENGTHOK = 14 # s1 == s2 (assert len(s1)==len(s2)) def __new__(cls, readonly_descrs_fields, write_descrs_fields, write_descrs_arrays, @@ -135,11 +144,18 @@ assert calldescr is not None return calldescr, func -def funcptr_for_oopspec(oopspecindex): - """A memo function that returns a pointer to the function described - by OS_XYZ (as a real low-level function pointer).""" + +def _funcptr_for_oopspec_memo(oopspecindex): from pypy.jit.codewriter import heaptracker _, func_as_int = _callinfo_for_oopspec.get(oopspecindex, (None, 0)) funcadr = heaptracker.int2adr(func_as_int) return funcadr.ptr -funcptr_for_oopspec._annspecialcase_ = 'specialize:memo' +_funcptr_for_oopspec_memo._annspecialcase_ = 'specialize:memo' + +def funcptr_for_oopspec(oopspecindex): + """A memo function that returns a pointer to the function described + by OS_XYZ (as a real low-level function pointer).""" + funcptr = _funcptr_for_oopspec_memo(oopspecindex) + assert funcptr + return funcptr +funcptr_for_oopspec._annspecialcase_ = 'specialize:arg(0)' Modified: pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py Sun Sep 26 13:56:56 2010 @@ -12,6 +12,7 @@ from pypy.rlib import objectmodel from pypy.rlib.jit import _we_are_jitted from pypy.translator.simplify import get_funcobj +from pypy.translator.unsimplify import varoftype def transform_graph(graph, cpu=None, callcontrol=None, portal_jd=None): @@ -1042,15 +1043,70 @@ op1 = [op1, SpaceOperation('-live-', [], None)] return op1 + def _register_extra_helper(self, oopspecindex, oopspec_name, + argtypes, resulttype): + # a bit hackish + if oopspecindex in _callinfo_for_oopspec: + return + c_func, TP = support.builtin_func_for_spec(self.cpu.rtyper, + oopspec_name, argtypes, + resulttype) + op = SpaceOperation('pseudo_call', + [c_func] + [varoftype(T) for T in argtypes], + varoftype(resulttype)) + calldescr = self.callcontrol.getcalldescr(op, oopspecindex) + _callinfo_for_oopspec[oopspecindex] = calldescr, c_func.value + def _handle_stroruni_call(self, op, oopspec_name, args): if args[0].concretetype.TO == rstr.STR: dict = {"stroruni.concat": EffectInfo.OS_STR_CONCAT, - "stroruni.slice": EffectInfo.OS_STR_SLICE} + "stroruni.slice": EffectInfo.OS_STR_SLICE, + "stroruni.equal": EffectInfo.OS_STR_EQUAL, + } elif args[0].concretetype.TO == rstr.UNICODE: dict = {"stroruni.concat": EffectInfo.OS_UNI_CONCAT, - "stroruni.slice": EffectInfo.OS_UNI_SLICE} + "stroruni.slice": EffectInfo.OS_UNI_SLICE, + "stroruni.equal": EffectInfo.OS_UNI_EQUAL, + } else: assert 0, "args[0].concretetype must be STR or UNICODE" + # + if oopspec_name == "stroruni.equal": + SoU = args[0].concretetype # Ptr(STR) or Ptr(UNICODE) + for otherindex, othername, argtypes, resulttype in [ + + (EffectInfo.OS_STREQ_SLICE_CHECKNULL, + "str.eq_slice_checknull", + [SoU, lltype.Signed, lltype.Signed, SoU], + lltype.Signed), + (EffectInfo.OS_STREQ_SLICE_NONNULL, + "str.eq_slice_nonnull", + [SoU, lltype.Signed, lltype.Signed, SoU], + lltype.Signed), + (EffectInfo.OS_STREQ_SLICE_CHAR, + "str.eq_slice_char", + [SoU, lltype.Signed, lltype.Signed, lltype.Char], + lltype.Signed), + (EffectInfo.OS_STREQ_NONNULL, + "str.eq_nonnull", + [SoU, SoU], + lltype.Signed), + (EffectInfo.OS_STREQ_NONNULL_CHAR, + "str.eq_nonnull_char", + [SoU, lltype.Char], + lltype.Signed), + (EffectInfo.OS_STREQ_CHECKNULL_CHAR, + "str.eq_checknull_char", + [SoU, lltype.Char], + lltype.Signed), + (EffectInfo.OS_STREQ_LENGTHOK, + "str.eq_lengthok", + [SoU, SoU], + lltype.Signed), + ]: + self._register_extra_helper(otherindex, othername, + argtypes, resulttype) + # return self._handle_oopspec_call(op, args, dict[oopspec_name]) # ---------- Modified: pypy/branch/jit-str/pypy/jit/codewriter/support.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/support.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/support.py Sun Sep 26 13:56:56 2010 @@ -277,6 +277,85 @@ _ll_1_str_str2unicode = ll_rstr.LLHelpers.ll_str2unicode + def _ll_4_str_eq_slice_checknull(s1, start, length, s2): + """str1[start : start + length] == str2.""" + if not s2: + return 0 + chars2 = s2.chars + if len(chars2) != length: + return 0 + j = 0 + chars1 = s1.chars + while j < length: + if chars1[start + j] != chars2[j]: + return 0 + j += 1 + return 1 + + def _ll_4_str_eq_slice_nonnull(s1, start, length, s2): + """str1[start : start + length] == str2, assuming str2 != NULL.""" + chars2 = s2.chars + if len(chars2) != length: + return 0 + j = 0 + chars1 = s1.chars + while j < length: + if chars1[start + j] != chars2[j]: + return 0 + j += 1 + return 1 + + def _ll_4_str_eq_slice_char(s1, start, length, c2): + """str1[start : start + length] == c2.""" + if length != 1: + return 0 + if s1.chars[start] != c2: + return 0 + return 1 + + def _ll_2_str_eq_nonnull(s1, s2): + len1 = len(s1.chars) + len2 = len(s2.chars) + if len1 != len2: + return 0 + j = 0 + chars1 = s1.chars + chars2 = s2.chars + while j < len1: + if chars1[j] != chars2[j]: + return 0 + j += 1 + return 1 + + def _ll_2_str_eq_nonnull_char(s1, c2): + chars = s1.chars + if len(chars) != 1: + return 0 + if chars[0] != c2: + return 0 + return 1 + + def _ll_2_str_eq_checknull_char(s1, c2): + if not s1: + return 0 + chars = s1.chars + if len(chars) != 1: + return 0 + if chars[0] != c2: + return 0 + return 1 + + def _ll_2_str_eq_lengthok(s1, s2): + j = 0 + chars1 = s1.chars + chars2 = s2.chars + len1 = len(chars1) + while j < len1: + if chars1[j] != chars2[j]: + return 0 + j += 1 + return 1 + # ---------- malloc with del ---------- def _ll_2_raw_malloc(TP, size): Modified: pypy/branch/jit-str/pypy/jit/codewriter/test/test_support.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/test/test_support.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/test/test_support.py Sun Sep 26 13:56:56 2010 @@ -1,7 +1,8 @@ import py from pypy.rpython.lltypesystem import lltype +from pypy.rpython.annlowlevel import llstr from pypy.objspace.flow.model import Variable, Constant, SpaceOperation -from pypy.jit.codewriter.support import decode_builtin_call +from pypy.jit.codewriter.support import decode_builtin_call, LLtypeHelpers def newconst(x): return Constant(x, lltype.typeOf(x)) @@ -65,3 +66,70 @@ assert opargs == [newconst(myarray), newconst(2), vc, vi] #impl = runner.get_oopspec_impl('spam.foobar', lltype.Ptr(A)) #assert impl(myarray, 2, 'A', 5) == 42 * ord('A') + +def test_streq_slice_checknull(): + p1 = llstr("hello world") + p2 = llstr("wor") + func = LLtypeHelpers._ll_4_str_eq_slice_checknull.im_func + assert func(p1, 6, 3, p2) == True + assert func(p1, 6, 2, p2) == False + assert func(p1, 5, 3, p2) == False + assert func(p1, 2, 1, llstr(None)) == False + +def test_streq_slice_nonnull(): + p1 = llstr("hello world") + p2 = llstr("wor") + func = LLtypeHelpers._ll_4_str_eq_slice_nonnull.im_func + assert func(p1, 6, 3, p2) == True + assert func(p1, 6, 2, p2) == False + assert func(p1, 5, 3, p2) == False + py.test.raises(AttributeError, func, p1, 2, 1, llstr(None)) + +def test_streq_slice_char(): + p1 = llstr("hello world") + func = LLtypeHelpers._ll_4_str_eq_slice_char.im_func + assert func(p1, 6, 3, "w") == False + assert func(p1, 6, 0, "w") == False + assert func(p1, 6, 1, "w") == True + assert func(p1, 6, 1, "x") == False + +def test_streq_nonnull(): + p1 = llstr("wor") + p2 = llstr("wor") + assert p1 != p2 + func = LLtypeHelpers._ll_2_str_eq_nonnull.im_func + assert func(p1, p1) == True + assert func(p1, p2) == True + assert func(p1, llstr("wrl")) == False + assert func(p1, llstr("world")) == False + assert func(p1, llstr("w")) == False + py.test.raises(AttributeError, func, p1, llstr(None)) + py.test.raises(AttributeError, func, llstr(None), p2) + +def test_streq_nonnull_char(): + func = LLtypeHelpers._ll_2_str_eq_nonnull_char.im_func + assert func(llstr("wor"), "x") == False + assert func(llstr("w"), "x") == False + assert func(llstr(""), "x") == False + assert func(llstr("x"), "x") == True + py.test.raises(AttributeError, func, llstr(None), "x") + +def test_streq_checknull_char(): + func = LLtypeHelpers._ll_2_str_eq_checknull_char.im_func + assert func(llstr("wor"), "x") == False + assert func(llstr("w"), "x") == False + assert func(llstr(""), "x") == False + assert func(llstr("x"), "x") == True + assert func(llstr(None), "x") == False + +def test_streq_lengthok(): + p1 = llstr("wor") + p2 = llstr("wor") + assert p1 != p2 + func = LLtypeHelpers._ll_2_str_eq_lengthok.im_func + assert func(p1, p1) == True + assert func(p1, p2) == True + assert func(p1, llstr("wrl")) == False + py.test.raises(IndexError, func, p1, llstr("w")) + py.test.raises(AttributeError, func, p1, llstr(None)) + py.test.raises(AttributeError, func, llstr(None), p2) Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/__init__.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/__init__.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/__init__.py Sun Sep 26 13:56:56 2010 @@ -3,6 +3,7 @@ from pypy.jit.metainterp.optimizeopt.intbounds import OptIntBounds from pypy.jit.metainterp.optimizeopt.virtualize import OptVirtualize from pypy.jit.metainterp.optimizeopt.heap import OptHeap +from pypy.jit.metainterp.optimizeopt.string import OptString def optimize_loop_1(metainterp_sd, loop, virtuals=True): """Optimize loop.operations to make it match the input of loop.specnodes @@ -13,6 +14,7 @@ optimizations = [OptIntBounds(), OptRewrite(), OptVirtualize(), + OptString(), OptHeap(), ] optimizer = Optimizer(metainterp_sd, loop, optimizations, virtuals) @@ -23,4 +25,3 @@ expect 'specnodes' on the bridge. """ optimize_loop_1(metainterp_sd, bridge, False) - Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/optimizer.py Sun Sep 26 13:56:56 2010 @@ -9,10 +9,10 @@ from pypy.jit.metainterp.optimizeutil import InvalidLoop, args_dict from pypy.jit.metainterp import resume, compile from pypy.jit.metainterp.typesystem import llhelper, oohelper -from pypy.rpython.lltypesystem import lltype, rstr +from pypy.rpython.lltypesystem import lltype from pypy.jit.metainterp.history import AbstractDescr, make_hashable_int from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded -from pypy.rpython import annlowlevel +from pypy.tool.pairtype import extendabletype LEVEL_UNKNOWN = '\x00' LEVEL_NONNULL = '\x01' @@ -24,6 +24,7 @@ MININT = -sys.maxint - 1 class OptValue(object): + __metaclass__ = extendabletype _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound') last_guard_index = -1 @@ -127,27 +128,6 @@ def setitem(self, index, value): raise NotImplementedError - def getstrlen(self, newoperations): - s = self.get_constant_string() - if s is not None: - return ConstInt(len(s)) - else: - self.ensure_nonnull() - box = self.force_box() - lengthbox = BoxInt() - newoperations.append(ResOperation(rop.STRLEN, [box], lengthbox)) - return lengthbox - - def get_constant_string(self): - if self.is_constant(): - s = self.box.getref(lltype.Ptr(rstr.STR)) - return annlowlevel.hlstr(s) - else: - return None - - def string_copy_parts(self, *args): - from pypy.jit.metainterp.optimizeopt import virtualize - return virtualize.default_string_copy_parts(self, *args) class ConstantValue(OptValue): def __init__(self, box): @@ -273,6 +253,7 @@ return None def make_equal_to(self, box, value): + assert isinstance(value, OptValue) assert box not in self.values self.values[box] = value Added: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/string.py ============================================================================== --- (empty file) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/string.py Sun Sep 26 13:56:56 2010 @@ -0,0 +1,536 @@ +from pypy.rpython.lltypesystem import lltype, rstr, llmemory +from pypy.rpython import annlowlevel +from pypy.jit.metainterp.history import Box, BoxInt, BoxPtr +from pypy.jit.metainterp.history import Const, ConstInt, ConstPtr +from pypy.jit.metainterp.history import get_const_ptr_for_string +from pypy.jit.metainterp.resoperation import rop, ResOperation +from pypy.jit.metainterp.optimizeopt import optimizer, virtualize +from pypy.jit.metainterp.optimizeopt.optimizer import CONST_0, CONST_1 +from pypy.jit.metainterp.optimizeopt.optimizer import llhelper +from pypy.jit.metainterp.optimizeutil import _findall +from pypy.jit.codewriter.effectinfo import EffectInfo, callinfo_for_oopspec +from pypy.jit.codewriter import heaptracker +from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.objectmodel import missing_value + + +class __extend__(optimizer.OptValue): + """New methods added to the base class OptValue for this file.""" + + def getstrlen(self, newoperations): + s = self.get_constant_string() + if s is not None: + return ConstInt(len(s)) + else: + if newoperations is None: + return None + self.ensure_nonnull() + box = self.force_box() + lengthbox = BoxInt() + newoperations.append(ResOperation(rop.STRLEN, [box], lengthbox)) + return lengthbox + + def get_constant_string(self): + if self.is_constant(): + s = self.box.getref(lltype.Ptr(rstr.STR)) + return annlowlevel.hlstr(s) + else: + return None + + def string_copy_parts(self, newoperations, targetbox, offsetbox): + # Copies the pointer-to-string 'self' into the target string + # given by 'targetbox', at the specified offset. Returns the offset + # at the end of the copy. + lengthbox = self.getstrlen(newoperations) + srcbox = self.force_box() + return copy_str_content(newoperations, srcbox, targetbox, + CONST_0, offsetbox, lengthbox) + + +class VAbstractStringValue(virtualize.AbstractVirtualValue): + + def _really_force(self): + s = self.get_constant_string() + if s is not None: + c_s = get_const_ptr_for_string(s) + self.make_constant(c_s) + return + assert self.source_op is not None + self.box = box = self.source_op.result + newoperations = self.optimizer.newoperations + lengthbox = self.getstrlen(newoperations) + newoperations.append(ResOperation(rop.NEWSTR, [lengthbox], box)) + self.string_copy_parts(newoperations, box, CONST_0) + + +class VStringPlainValue(VAbstractStringValue): + """A string built with newstr(const).""" + _lengthbox = None # cache only + + def setup(self, size): + self._chars = [optimizer.CVAL_UNINITIALIZED_ZERO] * size + + def setup_slice(self, longerlist, start, stop): + assert 0 <= start <= stop <= len(longerlist) + self._chars = longerlist[start:stop] + + def getstrlen(self, _): + if self._lengthbox is None: + self._lengthbox = ConstInt(len(self._chars)) + return self._lengthbox + + def getitem(self, index): + return self._chars[index] + + def setitem(self, index, charvalue): + assert isinstance(charvalue, optimizer.OptValue) + self._chars[index] = charvalue + + def get_constant_string(self): + for c in self._chars: + if c is optimizer.CVAL_UNINITIALIZED_ZERO or not c.is_constant(): + return None + return ''.join([chr(c.box.getint()) for c in self._chars]) + + def string_copy_parts(self, newoperations, targetbox, offsetbox): + for i in range(len(self._chars)): + charbox = self._chars[i].force_box() + newoperations.append(ResOperation(rop.STRSETITEM, [targetbox, + offsetbox, + charbox], None)) + offsetbox = _int_add(newoperations, offsetbox, CONST_1) + return offsetbox + + def get_args_for_fail(self, modifier): + if self.box is None and not modifier.already_seen_virtual(self.keybox): + charboxes = [value.get_key_box() for value in self._chars] + modifier.register_virtual_fields(self.keybox, charboxes) + for value in self._chars: + value.get_args_for_fail(modifier) + + def _make_virtual(self, modifier): + return modifier.make_vstrplain() + + +class VStringConcatValue(VAbstractStringValue): + """The concatenation of two other strings.""" + + def setup(self, left, right, lengthbox): + self.left = left + self.right = right + self.lengthbox = lengthbox + + def getstrlen(self, _): + return self.lengthbox + + def get_constant_string(self): + s1 = self.left.get_constant_string() + if s1 is None: + return None + s2 = self.right.get_constant_string() + if s2 is None: + return None + return s1 + s2 + + def string_copy_parts(self, newoperations, targetbox, offsetbox): + offsetbox = self.left.string_copy_parts(newoperations, targetbox, + offsetbox) + offsetbox = self.right.string_copy_parts(newoperations, targetbox, + offsetbox) + return offsetbox + + def get_args_for_fail(self, modifier): + if self.box is None and not modifier.already_seen_virtual(self.keybox): + # we don't store the lengthvalue in guards, because the + # guard-failed code starts with a regular STR_CONCAT again + leftbox = self.left.get_key_box() + rightbox = self.right.get_key_box() + modifier.register_virtual_fields(self.keybox, [leftbox, rightbox]) + self.left.get_args_for_fail(modifier) + self.right.get_args_for_fail(modifier) + + def _make_virtual(self, modifier): + return modifier.make_vstrconcat() + + +class VStringSliceValue(VAbstractStringValue): + """A slice.""" + vstr = vstart = vlength = missing_value # annotator fix + + def setup(self, vstr, vstart, vlength): + self.vstr = vstr + self.vstart = vstart + self.vlength = vlength + + def getstrlen(self, _): + return self.vlength.force_box() + + def get_constant_string(self): + if self.vstart.is_constant() and self.vlength.is_constant(): + s1 = self.vstr.get_constant_string() + if s1 is None: + return None + start = self.vstart.box.getint() + length = self.vlength.box.getint() + return s1[start : start + length] + return None + + def string_copy_parts(self, newoperations, targetbox, offsetbox): + lengthbox = self.getstrlen(newoperations) + return copy_str_content(newoperations, + self.vstr.force_box(), targetbox, + self.vstart.force_box(), offsetbox, + lengthbox) + + def get_args_for_fail(self, modifier): + if self.box is None and not modifier.already_seen_virtual(self.keybox): + boxes = [self.vstr.get_key_box(), + self.vstart.get_key_box(), + self.vlength.get_key_box()] + modifier.register_virtual_fields(self.keybox, boxes) + self.vstr.get_args_for_fail(modifier) + self.vstart.get_args_for_fail(modifier) + self.vlength.get_args_for_fail(modifier) + + def _make_virtual(self, modifier): + return modifier.make_vstrslice() + + +def copy_str_content(newoperations, srcbox, targetbox, + srcoffsetbox, offsetbox, lengthbox): + if isinstance(srcbox, ConstPtr) and isinstance(srcoffsetbox, Const): + M = 5 + else: + M = 2 + if isinstance(lengthbox, ConstInt) and lengthbox.value <= M: + # up to M characters are done "inline", i.e. with STRGETITEM/STRSETITEM + # instead of just a COPYSTRCONTENT. + for i in range(lengthbox.value): + charbox = _strgetitem(newoperations, srcbox, srcoffsetbox) + srcoffsetbox = _int_add(newoperations, srcoffsetbox, CONST_1) + newoperations.append(ResOperation(rop.STRSETITEM, [targetbox, + offsetbox, + charbox], None)) + offsetbox = _int_add(newoperations, offsetbox, CONST_1) + else: + nextoffsetbox = _int_add(newoperations, offsetbox, lengthbox) + op = ResOperation(rop.COPYSTRCONTENT, [srcbox, targetbox, + srcoffsetbox, offsetbox, + lengthbox], None) + newoperations.append(op) + offsetbox = nextoffsetbox + return offsetbox + +def _int_add(newoperations, box1, box2): + if isinstance(box1, ConstInt): + if box1.value == 0: + return box2 + if isinstance(box2, ConstInt): + return ConstInt(box1.value + box2.value) + elif isinstance(box2, ConstInt) and box2.value == 0: + return box1 + resbox = BoxInt() + newoperations.append(ResOperation(rop.INT_ADD, [box1, box2], resbox)) + return resbox + +def _int_sub(newoperations, box1, box2): + if isinstance(box2, ConstInt): + if box2.value == 0: + return box1 + if isinstance(box1, ConstInt): + return ConstInt(box1.value - box2.value) + resbox = BoxInt() + newoperations.append(ResOperation(rop.INT_SUB, [box1, box2], resbox)) + return resbox + +def _strgetitem(newoperations, strbox, indexbox): + if isinstance(strbox, ConstPtr) and isinstance(indexbox, ConstInt): + s = strbox.getref(lltype.Ptr(rstr.STR)) + return ConstInt(ord(s.chars[indexbox.getint()])) + resbox = BoxInt() + newoperations.append(ResOperation(rop.STRGETITEM, [strbox, indexbox], + resbox)) + return resbox + + +class OptString(optimizer.Optimization): + "Handling of strings and unicodes." + + def make_vstring_plain(self, box, source_op=None): + vvalue = VStringPlainValue(self.optimizer, box, source_op) + self.make_equal_to(box, vvalue) + return vvalue + + def make_vstring_concat(self, box, source_op=None): + vvalue = VStringConcatValue(self.optimizer, box, source_op) + self.make_equal_to(box, vvalue) + return vvalue + + def make_vstring_slice(self, box, source_op=None): + vvalue = VStringSliceValue(self.optimizer, box, source_op) + self.make_equal_to(box, vvalue) + return vvalue + + def optimize_CALL(self, op): + # dispatch based on 'oopspecindex' to a method that handles + # specifically the given oopspec call. For non-oopspec calls, + # oopspecindex is just zero. + effectinfo = op.descr.get_extra_info() + if effectinfo is not None: + oopspecindex = effectinfo.oopspecindex + for value, meth in opt_call_oopspec_ops: + if oopspecindex == value: + if meth(self, op): + return + self.emit_operation(op) + + def opt_call_oopspec_ARRAYCOPY(self, op): + source_value = self.getvalue(op.args[1]) + dest_value = self.getvalue(op.args[2]) + source_start_box = self.get_constant_box(op.args[3]) + dest_start_box = self.get_constant_box(op.args[4]) + length = self.get_constant_box(op.args[5]) + if (source_value.is_virtual() and source_start_box and dest_start_box + and length and dest_value.is_virtual()): + # XXX optimize the case where dest value is not virtual, + # but we still can avoid a mess + source_start = source_start_box.getint() + dest_start = dest_start_box.getint() + for index in range(length.getint()): + val = source_value.getitem(index + source_start) + dest_value.setitem(index + dest_start, val) + return True + if length and length.getint() == 0: + return True # 0-length arraycopy + return False + + def optimize_NEWSTR(self, op): + length_box = self.get_constant_box(op.args[0]) + if length_box: + # if the original 'op' did not have a ConstInt as argument, + # build a new one with the ConstInt argument + if not isinstance(op.args[0], ConstInt): + op = ResOperation(rop.NEWSTR, [length_box], op.result) + vvalue = self.make_vstring_plain(op.result, op) + vvalue.setup(length_box.getint()) + else: + self.getvalue(op.result).ensure_nonnull() + self.emit_operation(op) + + def optimize_STRSETITEM(self, op): + value = self.getvalue(op.args[0]) + if value.is_virtual() and isinstance(value, VStringPlainValue): + indexbox = self.get_constant_box(op.args[1]) + if indexbox is not None: + value.setitem(indexbox.getint(), self.getvalue(op.args[2])) + return + value.ensure_nonnull() + self.emit_operation(op) + + def optimize_STRGETITEM(self, op): + value = self.getvalue(op.args[0]) + vindex = self.getvalue(op.args[1]) + vresult = self.strgetitem(value, vindex) + self.make_equal_to(op.result, vresult) + + def strgetitem(self, value, vindex): + value.ensure_nonnull() + # + if value.is_virtual() and isinstance(value, VStringSliceValue): + fullindexbox = _int_add(self.optimizer.newoperations, + value.vstart.force_box(), + vindex.force_box()) + value = value.vstr + vindex = self.getvalue(fullindexbox) + # + if isinstance(value, VStringPlainValue): # even if no longer virtual + if vindex.is_constant(): + return value.getitem(vindex.box.getint()) + # + resbox = _strgetitem(self.optimizer.newoperations, + value.force_box(),vindex.force_box()) + return self.getvalue(resbox) + + def optimize_STRLEN(self, op): + value = self.getvalue(op.args[0]) + lengthbox = value.getstrlen(self.optimizer.newoperations) + self.make_equal_to(op.result, self.getvalue(lengthbox)) + + def opt_call_oopspec_STR_CONCAT(self, op): + vleft = self.getvalue(op.args[1]) + vright = self.getvalue(op.args[2]) + vleft.ensure_nonnull() + vright.ensure_nonnull() + newoperations = self.optimizer.newoperations + len1box = vleft.getstrlen(newoperations) + len2box = vright.getstrlen(newoperations) + lengthbox = _int_add(newoperations, len1box, len2box) + value = self.make_vstring_concat(op.result, op) + value.setup(vleft, vright, lengthbox) + return True + + def opt_call_oopspec_STR_SLICE(self, op): + newoperations = self.optimizer.newoperations + vstr = self.getvalue(op.args[1]) + vstart = self.getvalue(op.args[2]) + vstop = self.getvalue(op.args[3]) + # + if (isinstance(vstr, VStringPlainValue) and vstart.is_constant() + and vstop.is_constant()): + # slicing with constant bounds of a VStringPlainValue + value = self.make_vstring_plain(op.result, op) + value.setup_slice(vstr._chars, vstart.box.getint(), + vstop.box.getint()) + return True + # + vstr.ensure_nonnull() + if isinstance(vstr, VStringSliceValue): + # double slicing s[i:j][k:l] + vintermediate = vstr + vstr = vintermediate.vstr + startbox = _int_add(newoperations, + vintermediate.vstart.force_box(), + vstart.force_box()) + vstart = self.getvalue(startbox) + # + lengthbox = _int_sub(newoperations, vstop.force_box(), + vstart.force_box()) + value = self.make_vstring_slice(op.result, op) + value.setup(vstr, vstart, self.getvalue(lengthbox)) + return True + + def opt_call_oopspec_STR_EQUAL(self, op): + v1 = self.getvalue(op.args[1]) + v2 = self.getvalue(op.args[2]) + # + l1box = v1.getstrlen(None) + l2box = v2.getstrlen(None) + if (l1box is not None and l2box is not None and + isinstance(l1box, ConstInt) and + isinstance(l2box, ConstInt) and + l1box.value != l2box.value): + # statically known to have a different length + self.make_constant(op.result, CONST_0) + return True + # + if self.handle_str_equal_level1(v1, v2, op.result): + return True + if self.handle_str_equal_level1(v2, v1, op.result): + return True + if self.handle_str_equal_level2(v1, v2, op.result): + return True + if self.handle_str_equal_level2(v2, v1, op.result): + return True + # + if v1.is_nonnull() and v2.is_nonnull(): + if l1box is not None and l2box is not None and ( + l1box == l2box or (isinstance(l1box, ConstInt) and + isinstance(l2box, ConstInt) and + l1box.value == l2box.value)): + do = EffectInfo.OS_STREQ_LENGTHOK + else: + do = EffectInfo.OS_STREQ_NONNULL + self.generate_modified_call(do, [v1.force_box(), + v2.force_box()], op.result) + return True + return False + + def handle_str_equal_level1(self, v1, v2, resultbox): + l2box = v2.getstrlen(None) + if isinstance(l2box, ConstInt): + if l2box.value == 0: + lengthbox = v1.getstrlen(self.optimizer.newoperations) + seo = self.optimizer.send_extra_operation + seo(ResOperation(rop.INT_EQ, [lengthbox, CONST_0], resultbox)) + return True + if l2box.value == 1: + l1box = v1.getstrlen(None) + if isinstance(l1box, ConstInt) and l1box.value == 1: + # comparing two single chars + vchar1 = self.strgetitem(v1, optimizer.CVAL_ZERO) + vchar2 = self.strgetitem(v2, optimizer.CVAL_ZERO) + seo = self.optimizer.send_extra_operation + seo(ResOperation(rop.INT_EQ, [vchar1.force_box(), + vchar2.force_box()], + resultbox)) + return True + if isinstance(v1, VStringSliceValue): + vchar = self.strgetitem(v2, optimizer.CVAL_ZERO) + do = EffectInfo.OS_STREQ_SLICE_CHAR + self.generate_modified_call(do, [v1.vstr.force_box(), + v1.vstart.force_box(), + v1.vlength.force_box(), + vchar.force_box()], + resultbox) + return True + # + if v2.is_null(): + if v1.is_nonnull(): + self.make_constant(resultbox, CONST_0) + return True + if v1.is_null(): + self.make_constant(resultbox, CONST_1) + return True + op = ResOperation(rop.PTR_EQ, [v1.force_box(), + llhelper.CONST_NULL], + resultbox) + self.optimizer.newoperations.append(op) + return True + # + return False + + def handle_str_equal_level2(self, v1, v2, resultbox): + l2box = v2.getstrlen(None) + if isinstance(l2box, ConstInt): + if l2box.value == 1: + vchar = self.strgetitem(v2, optimizer.CVAL_ZERO) + if v1.is_nonnull(): + do = EffectInfo.OS_STREQ_NONNULL_CHAR + else: + do = EffectInfo.OS_STREQ_CHECKNULL_CHAR + self.generate_modified_call(do, [v1.force_box(), + vchar.force_box()], resultbox) + return True + # + if v1.is_virtual() and isinstance(v1, VStringSliceValue): + if v2.is_nonnull(): + do = EffectInfo.OS_STREQ_SLICE_NONNULL + else: + do = EffectInfo.OS_STREQ_SLICE_CHECKNULL + self.generate_modified_call(do, [v1.vstr.force_box(), + v1.vstart.force_box(), + v1.vlength.force_box(), + v2.force_box()], resultbox) + return True + return False + + def generate_modified_call(self, oopspecindex, args, result): + calldescr, func = callinfo_for_oopspec(oopspecindex) + func = llmemory.cast_ptr_to_adr(func) + func = heaptracker.adr2int(func) + op = ResOperation(rop.CALL, [ConstInt(func)] + args, result, + descr=calldescr) + self.optimizer.newoperations.append(op) + generate_modified_call._annspecialcase_ = 'specialize:arg(1)' + + def propagate_forward(self, op): + opnum = op.opnum + for value, func in optimize_ops: + if opnum == value: + func(self, op) + break + else: + self.emit_operation(op) + +optimize_ops = _findall(OptString, 'optimize_') + +def _findall_call_oopspec(): + prefix = 'opt_call_oopspec_' + result = [] + for name in dir(OptString): + if name.startswith(prefix): + value = getattr(EffectInfo, 'OS_' + name[len(prefix):]) + assert isinstance(value, int) and value != 0 + result.append((value, getattr(OptString, name))) + return unrolling_iterable(result) +opt_call_oopspec_ops = _findall_call_oopspec() Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/virtualize.py Sun Sep 26 13:56:56 2010 @@ -5,11 +5,8 @@ from pypy.jit.metainterp.specnode import VirtualStructSpecNode from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.optimizeutil import _findall -from pypy.jit.metainterp.history import get_const_ptr_for_string from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.optimizeopt.optimizer import * -from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.rlib.unroll import unrolling_iterable class AbstractVirtualValue(OptValue): @@ -197,217 +194,6 @@ return modifier.make_varray(self.arraydescr) -class VAbstractStringValue(AbstractVirtualValue): - - def _really_force(self): - s = self.get_constant_string() - if s is not None: - c_s = get_const_ptr_for_string(s) - self.make_constant(c_s) - return - assert self.source_op is not None - self.box = box = self.source_op.result - newoperations = self.optimizer.newoperations - lengthbox = self.getstrlen(newoperations) - newoperations.append(ResOperation(rop.NEWSTR, [lengthbox], box)) - self.string_copy_parts(newoperations, box, CONST_0) - - -class VStringPlainValue(VAbstractStringValue): - """A string built with newstr(const).""" - _lengthbox = None # cache only - - def setup(self, size): - self._chars = [CVAL_UNINITIALIZED_ZERO] * size - - def getstrlen(self, _): - if self._lengthbox is None: - self._lengthbox = ConstInt(len(self._chars)) - return self._lengthbox - - def getitem(self, index): - return self._chars[index] - - def setitem(self, index, charvalue): - assert isinstance(charvalue, OptValue) - self._chars[index] = charvalue - - def get_constant_string(self): - for c in self._chars: - if c is CVAL_UNINITIALIZED_ZERO or not c.is_constant(): - return None - return ''.join([chr(c.box.getint()) for c in self._chars]) - - def string_copy_parts(self, newoperations, targetbox, offsetbox): - for i in range(len(self._chars)): - charbox = self._chars[i].force_box() - newoperations.append(ResOperation(rop.STRSETITEM, [targetbox, - offsetbox, - charbox], None)) - offsetbox = _int_add(newoperations, offsetbox, CONST_1) - return offsetbox - - def get_args_for_fail(self, modifier): - if self.box is None and not modifier.already_seen_virtual(self.keybox): - charboxes = [value.get_key_box() for value in self._chars] - modifier.register_virtual_fields(self.keybox, charboxes) - for value in self._chars: - value.get_args_for_fail(modifier) - - def _make_virtual(self, modifier): - return modifier.make_vstrplain() - - -class VStringConcatValue(VAbstractStringValue): - """The concatenation of two other strings.""" - - def setup(self, left, right, lengthbox): - self.left = left - self.right = right - self.lengthbox = lengthbox - - def getstrlen(self, _): - return self.lengthbox - - def get_constant_string(self): - s1 = self.left.get_constant_string() - if s1 is None: - return None - s2 = self.right.get_constant_string() - if s2 is None: - return None - return s1 + s2 - - def string_copy_parts(self, newoperations, targetbox, offsetbox): - offsetbox = self.left.string_copy_parts(newoperations, targetbox, - offsetbox) - offsetbox = self.right.string_copy_parts(newoperations, targetbox, - offsetbox) - return offsetbox - - def get_args_for_fail(self, modifier): - if self.box is None and not modifier.already_seen_virtual(self.keybox): - # we don't store the lengthvalue in guards, because the - # guard-failed code starts with a regular STR_CONCAT again - leftbox = self.left.get_key_box() - rightbox = self.right.get_key_box() - modifier.register_virtual_fields(self.keybox, [leftbox, rightbox]) - self.left.get_args_for_fail(modifier) - self.right.get_args_for_fail(modifier) - - def _make_virtual(self, modifier): - return modifier.make_vstrconcat() - - -class VStringSliceValue(VAbstractStringValue): - """A slice.""" - - def setup(self, vstr, vstart, vlength): - self.vstr = vstr - self.vstart = vstart - self.vlength = vlength - - def getstrlen(self, newoperations): - return self.vlength.force_box() - - def get_constant_string(self): - if self.vstart.is_constant() and self.vlength.is_constant(): - s1 = self.vstr.get_constant_string() - if s1 is None: - return None - start = self.vstart.box.getint() - length = self.vlength.box.getint() - return s1[start : start + length] - return None - - def string_copy_parts(self, newoperations, targetbox, offsetbox): - lengthbox = self.getstrlen(newoperations) - return copy_str_content(newoperations, - self.vstr.force_box(), targetbox, - self.vstart.force_box(), offsetbox, - lengthbox) - - def get_args_for_fail(self, modifier): - if self.box is None and not modifier.already_seen_virtual(self.keybox): - boxes = [self.vstr.get_key_box(), - self.vstart.get_key_box(), - self.vlength.get_key_box()] - modifier.register_virtual_fields(self.keybox, boxes) - self.vstr.get_args_for_fail(modifier) - self.vstart.get_args_for_fail(modifier) - self.vlength.get_args_for_fail(modifier) - - def _make_virtual(self, modifier): - return modifier.make_vstrslice() - - -def default_string_copy_parts(srcvalue, newoperations, targetbox, offsetbox): - # Copies the pointer-to-string 'srcvalue' into the target string - # given by 'targetbox', at the specified offset. Returns the offset - # at the end of the copy. - lengthbox = srcvalue.getstrlen(newoperations) - srcbox = srcvalue.force_box() - return copy_str_content(newoperations, srcbox, targetbox, - CONST_0, offsetbox, lengthbox) - -def copy_str_content(newoperations, srcbox, targetbox, - srcoffsetbox, offsetbox, lengthbox): - if isinstance(srcbox, ConstPtr) and isinstance(srcoffsetbox, Const): - M = 5 - else: - M = 2 - if isinstance(lengthbox, ConstInt) and lengthbox.value <= M: - # up to M characters are done "inline", i.e. with STRGETITEM/STRSETITEM - # instead of just a COPYSTRCONTENT. - for i in range(lengthbox.value): - charbox = _strgetitem(newoperations, srcbox, srcoffsetbox) - srcoffsetbox = _int_add(newoperations, srcoffsetbox, CONST_1) - newoperations.append(ResOperation(rop.STRSETITEM, [targetbox, - offsetbox, - charbox], None)) - offsetbox = _int_add(newoperations, offsetbox, CONST_1) - else: - nextoffsetbox = _int_add(newoperations, offsetbox, lengthbox) - op = ResOperation(rop.COPYSTRCONTENT, [srcbox, targetbox, - srcoffsetbox, offsetbox, - lengthbox], None) - newoperations.append(op) - offsetbox = nextoffsetbox - return offsetbox - -def _int_add(newoperations, box1, box2): - if isinstance(box1, ConstInt): - if box1.value == 0: - return box2 - if isinstance(box2, ConstInt): - return ConstInt(box1.value + box2.value) - elif isinstance(box2, ConstInt) and box2.value == 0: - return box1 - resbox = BoxInt() - newoperations.append(ResOperation(rop.INT_ADD, [box1, box2], resbox)) - return resbox - -def _int_sub(newoperations, box1, box2): - if isinstance(box2, ConstInt): - if box2.value == 0: - return box1 - if isinstance(box1, ConstInt): - return ConstInt(box1.value - box2.value) - resbox = BoxInt() - newoperations.append(ResOperation(rop.INT_SUB, [box1, box2], resbox)) - return resbox - -def _strgetitem(newoperations, strbox, indexbox): - # hum, this repetition of the operations is not quite right - if isinstance(strbox, ConstPtr) and isinstance(indexbox, ConstInt): - s = strbox.getref(lltype.Ptr(rstr.STR)) - return ConstInt(ord(s.chars[indexbox.getint()])) - resbox = BoxInt() - newoperations.append(ResOperation(rop.STRGETITEM, [strbox, indexbox], - resbox)) - return resbox - - class __extend__(SpecNode): def setup_virtual_node(self, optimizer, box, newinputargs): raise NotImplementedError @@ -496,21 +282,6 @@ self.make_equal_to(box, vvalue) return vvalue - def make_vstring_plain(self, box, source_op=None): - vvalue = VStringPlainValue(self.optimizer, box, source_op) - self.make_equal_to(box, vvalue) - return vvalue - - def make_vstring_concat(self, box, source_op=None): - vvalue = VStringConcatValue(self.optimizer, box, source_op) - self.make_equal_to(box, vvalue) - return vvalue - - def make_vstring_slice(self, box, source_op=None): - vvalue = VStringSliceValue(self.optimizer, box, source_op) - self.make_equal_to(box, vvalue) - return vvalue - def optimize_JUMP(self, op): orgop = self.optimizer.loop.operations[-1] exitargs = [] @@ -649,113 +420,6 @@ ###self.heap_op_optimizer.optimize_SETARRAYITEM_GC(op, value, fieldvalue) self.emit_operation(op) - def optimize_CALL(self, op): - # dispatch based on 'oopspecindex' to a method that handles - # specifically the given oopspec call. For non-oopspec calls, - # oopspecindex is just zero. - effectinfo = op.descr.get_extra_info() - if effectinfo is not None: - oopspecindex = effectinfo.oopspecindex - for value, meth in opt_call_oopspec_ops: - if oopspecindex == value: - if meth(self, op): - return - self.emit_operation(op) - - def opt_call_oopspec_ARRAYCOPY(self, op): - source_value = self.getvalue(op.args[1]) - dest_value = self.getvalue(op.args[2]) - source_start_box = self.get_constant_box(op.args[3]) - dest_start_box = self.get_constant_box(op.args[4]) - length = self.get_constant_box(op.args[5]) - if (source_value.is_virtual() and source_start_box and dest_start_box - and length and dest_value.is_virtual()): - # XXX optimize the case where dest value is not virtual, - # but we still can avoid a mess - source_start = source_start_box.getint() - dest_start = dest_start_box.getint() - for index in range(length.getint()): - val = source_value.getitem(index + source_start) - dest_value.setitem(index + dest_start, val) - return True - if length and length.getint() == 0: - return True # 0-length arraycopy - return False - - def optimize_NEWSTR(self, op): - length_box = self.get_constant_box(op.args[0]) - if length_box: - # if the original 'op' did not have a ConstInt as argument, - # build a new one with the ConstInt argument - if not isinstance(op.args[0], ConstInt): - op = ResOperation(rop.NEWSTR, [length_box], op.result) - vvalue = self.make_vstring_plain(op.result, op) - vvalue.setup(length_box.getint()) - else: - self.getvalue(op.result).ensure_nonnull() - self.emit_operation(op) - - def optimize_STRSETITEM(self, op): - value = self.getvalue(op.args[0]) - if value.is_virtual() and isinstance(value, VStringPlainValue): - indexbox = self.get_constant_box(op.args[1]) - if indexbox is not None: - value.setitem(indexbox.getint(), self.getvalue(op.args[2])) - return - value.ensure_nonnull() - self.emit_operation(op) - - def optimize_STRGETITEM(self, op): - value = self.getvalue(op.args[0]) - if isinstance(value, VStringPlainValue): # even if no longer virtual - indexbox = self.get_constant_box(op.args[1]) - if indexbox is not None: - charvalue = value.getitem(indexbox.getint()) - self.make_equal_to(op.result, charvalue) - return - value.ensure_nonnull() - self.emit_operation(op) - - def optimize_STRLEN(self, op): - value = self.getvalue(op.args[0]) - lengthbox = value.getstrlen(self.optimizer.newoperations) - self.make_equal_to(op.result, self.getvalue(lengthbox)) - - def opt_call_oopspec_STR_CONCAT(self, op): - vleft = self.getvalue(op.args[1]) - vright = self.getvalue(op.args[2]) - vleft.ensure_nonnull() - vright.ensure_nonnull() - newoperations = self.optimizer.newoperations - len1box = vleft.getstrlen(newoperations) - len2box = vright.getstrlen(newoperations) - lengthbox = _int_add(newoperations, len1box, len2box) - value = self.make_vstring_concat(op.result, op) - value.setup(vleft, vright, lengthbox) - return True - - def opt_call_oopspec_STR_SLICE(self, op): - newoperations = self.optimizer.newoperations - vstr = self.getvalue(op.args[1]) - vstart = self.getvalue(op.args[2]) - vstop = self.getvalue(op.args[3]) - vstr.ensure_nonnull() - lengthbox = _int_sub(newoperations, vstop.force_box(), - vstart.force_box()) - value = self.make_vstring_slice(op.result, op) - # - if isinstance(vstr, VStringSliceValue): - # double slicing s[i:j][k:l] - vintermediate = vstr - vstr = vintermediate.vstr - startbox = _int_add(newoperations, - vintermediate.vstart.force_box(), - vstart.force_box()) - vstart = self.getvalue(startbox) - # - value.setup(vstr, vstart, self.getvalue(lengthbox)) - return True - def propagate_forward(self, op): opnum = op.opnum for value, func in optimize_ops: @@ -766,14 +430,3 @@ self.emit_operation(op) optimize_ops = _findall(OptVirtualize, 'optimize_') - -def _findall_call_oopspec(): - prefix = 'opt_call_oopspec_' - result = [] - for name in dir(OptVirtualize): - if name.startswith(prefix): - value = getattr(EffectInfo, 'OS_' + name[len(prefix):]) - assert isinstance(value, int) and value != 0 - result.append((value, getattr(OptVirtualize, name))) - return unrolling_iterable(result) -opt_call_oopspec_ops = _findall_call_oopspec() Modified: pypy/branch/jit-str/pypy/jit/metainterp/resume.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/resume.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/resume.py Sun Sep 26 13:56:56 2010 @@ -9,7 +9,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rstr from pypy.rlib import rarithmetic from pypy.rlib.objectmodel import we_are_translated, specialize -from pypy.rlib.debug import have_debug_prints +from pypy.rlib.debug import have_debug_prints, ll_assert from pypy.rlib.debug import debug_start, debug_stop, debug_print # Logic to encode the chain of frames and the state of the boxes at a @@ -408,9 +408,7 @@ class AbstractVirtualInfo(object): - #def allocate(self, metainterp): - # raise NotImplementedError - #def setfields(self, decoder, struct): + #def allocate(self, decoder, index): # raise NotImplementedError def equals(self, fieldnums): return tagged_list_eq(self.fieldnums, fieldnums) @@ -430,6 +428,7 @@ for i in range(len(self.fielddescrs)): descr = self.fielddescrs[i] decoder.setfield(descr, struct, self.fieldnums[i]) + return struct def debug_prints(self): assert len(self.fielddescrs) == len(self.fieldnums) @@ -444,8 +443,10 @@ self.known_class = known_class @specialize.argtype(1) - def allocate(self, decoder): - return decoder.allocate_with_vtable(self.known_class) + def allocate(self, decoder, index): + struct = decoder.allocate_with_vtable(self.known_class) + decoder.virtuals_cache[index] = struct + return self.setfields(decoder, struct) def debug_prints(self): debug_print("\tvirtualinfo", self.known_class.repr_rpython()) @@ -457,8 +458,10 @@ self.typedescr = typedescr @specialize.argtype(1) - def allocate(self, decoder): - return decoder.allocate_struct(self.typedescr) + def allocate(self, decoder, index): + struct = decoder.allocate_struct(self.typedescr) + decoder.virtuals_cache[index] = struct + return self.setfields(decoder, struct) def debug_prints(self): debug_print("\tvstructinfo", self.typedescr.repr_rpython()) @@ -470,14 +473,11 @@ #self.fieldnums = ... @specialize.argtype(1) - def allocate(self, decoder): + def allocate(self, decoder, index): length = len(self.fieldnums) - return decoder.allocate_array(self.arraydescr, length) - - @specialize.argtype(1) - def setfields(self, decoder, array): arraydescr = self.arraydescr - length = len(self.fieldnums) + array = decoder.allocate_array(arraydescr, length) + decoder.virtuals_cache[index] = array # NB. the check for the kind of array elements is moved out of the loop if arraydescr.is_array_of_pointers(): for i in range(length): @@ -491,25 +491,25 @@ for i in range(length): decoder.setarrayitem_int(arraydescr, array, i, self.fieldnums[i]) + return array def debug_prints(self): debug_print("\tvarrayinfo", self.arraydescr) for i in self.fieldnums: debug_print("\t\t", str(untag(i))) + class VStrPlainInfo(AbstractVirtualInfo): """Stands for the string made out of the characters of all fieldnums.""" @specialize.argtype(1) - def allocate(self, decoder): - length = len(self.fieldnums) - return decoder.allocate_string(length) - - @specialize.argtype(1) - def setfields(self, decoder, string): + def allocate(self, decoder, index): length = len(self.fieldnums) + string = decoder.allocate_string(length) + decoder.virtuals_cache[index] = string for i in range(length): - decoder.strsetitem(string, i, self.fieldnums[i]) + decoder.string_setitem(string, i, self.fieldnums[i]) + return string def debug_prints(self): debug_print("\tvstrplaininfo length", len(self.fieldnums)) @@ -520,18 +520,14 @@ other strings.""" @specialize.argtype(1) - def allocate(self, decoder): + def allocate(self, decoder, index): # xxx for blackhole resuming, this will build all intermediate # strings and throw them away immediately, which is a bit sub- # efficient. Not sure we care. left, right = self.fieldnums - return decoder.concat_strings(left, right) - - @specialize.argtype(1) - def setfields(self, decoder, string): - # we do everything in allocate(); no risk of circular data structure - # with strings. - pass + string = decoder.concat_strings(left, right) + decoder.virtuals_cache[index] = string + return string def debug_prints(self): debug_print("\tvstrconcatinfo") @@ -543,15 +539,11 @@ """Stands for the string made out of slicing another string.""" @specialize.argtype(1) - def allocate(self, decoder): - str, start, length = self.fieldnums - return decoder.slice_string(str, start, length) - - @specialize.argtype(1) - def setfields(self, decoder, string): - # we do everything in allocate(); no risk of circular data structure - # with strings. - pass + def allocate(self, decoder, index): + largerstr, start, length = self.fieldnums + string = decoder.slice_string(largerstr, start, length) + decoder.virtuals_cache[index] = string + return string def debug_prints(self): debug_print("\tvstrsliceinfo") @@ -568,7 +560,8 @@ blackholing and want the best performance. """ _mixin_ = True - virtuals = None + rd_virtuals = None + virtuals_cache = None virtual_default = None def _init(self, cpu, storage): @@ -580,17 +573,29 @@ self._prepare_virtuals(storage.rd_virtuals) self._prepare_pendingfields(storage.rd_pendingfields) + def getvirtual(self, index): + # Returns the index'th virtual, building it lazily if needed. + # Note that this may be called recursively; that's why the + # allocate() methods must fill in the cache as soon as they + # have the object, before they fill its fields. + v = self.virtuals_cache[index] + if not v: + v = self.rd_virtuals[index].allocate(self, index) + ll_assert(v == self.virtuals_cache[index], "resume.py: bad cache") + return v + + def force_all_virtuals(self): + rd_virtuals = self.rd_virtuals + if rd_virtuals: + for i in range(len(rd_virtuals)): + if rd_virtuals[i] is not None: + self.getvirtual(i) + return self.virtuals_cache + def _prepare_virtuals(self, virtuals): if virtuals: - self.virtuals = [self.virtual_default] * len(virtuals) - for i in range(len(virtuals)): - vinfo = virtuals[i] - if vinfo is not None: - self.virtuals[i] = vinfo.allocate(self) - for i in range(len(virtuals)): - vinfo = virtuals[i] - if vinfo is not None: - vinfo.setfields(self, self.virtuals[i]) + self.rd_virtuals = virtuals + self.virtuals_cache = [self.virtual_default] * len(virtuals) def _prepare_pendingfields(self, pendingfields): if pendingfields is not None: @@ -698,6 +703,11 @@ return self.metainterp.execute_and_record(rop.NEWSTR, None, ConstInt(length)) + def string_setitem(self, strbox, index, charnum): + charbox = self.decode_box(charnum, INT) + self.metainterp.execute_and_record(rop.STRSETITEM, None, + strbox, ConstInt(index), charbox) + def concat_strings(self, str1num, str2num): calldescr, func = callinfo_for_oopspec(EffectInfo.OS_STR_CONCAT) str1box = self.decode_box(str1num, REF) @@ -756,9 +766,7 @@ else: box = self.consts[num] elif tag == TAGVIRTUAL: - virtuals = self.virtuals - assert virtuals is not None - box = virtuals[num] + box = self.getvirtual(num) elif tag == TAGINT: box = ConstInt(num) else: @@ -843,7 +851,7 @@ resumereader.handling_async_forcing() vrefinfo = metainterp_sd.virtualref_info resumereader.consume_vref_and_vable(vrefinfo, vinfo) - return resumereader.virtuals + return resumereader.force_all_virtuals() class ResumeDataDirectReader(AbstractResumeDataReader): unique_id = lambda: None @@ -861,7 +869,9 @@ # special case for resuming after a GUARD_NOT_FORCED: we already # have the virtuals self.resume_after_guard_not_forced = 2 - self.virtuals = all_virtuals + self.virtuals_cache = all_virtuals + # self.rd_virtuals can remain None, because virtuals_cache is + # already filled def handling_async_forcing(self): self.resume_after_guard_not_forced = 1 @@ -935,6 +945,10 @@ def allocate_string(self, length): return self.cpu.bh_newstr(length) + def string_setitem(self, str, index, charnum): + char = self.decode_int(charnum) + self.cpu.bh_strsetitem(str, index, char) + def concat_strings(self, str1num, str2num): str1 = self.decode_ref(str1num) str2 = self.decode_ref(str2num) @@ -995,9 +1009,7 @@ return self.cpu.ts.NULLREF return self.consts[num].getref_base() elif tag == TAGVIRTUAL: - virtuals = self.virtuals - assert virtuals is not None - return virtuals[num] + return self.getvirtual(num) else: assert tag == TAGBOX if num < 0: Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizefindnode.py Sun Sep 26 13:56:56 2010 @@ -123,6 +123,27 @@ EffectInfo([], [], [], oopspecindex=EffectInfo.OS_STR_SLICE)) strequaldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [], [], oopspecindex=EffectInfo.OS_STR_EQUAL)) + streq_slice_checknull_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_SLICE_CHECKNULL)) + streq_slice_nonnull_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_SLICE_NONNULL)) + streq_slice_char_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_SLICE_CHAR)) + streq_nonnull_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_NONNULL)) + streq_nonnull_char_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_NONNULL_CHAR)) + streq_checknull_char_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_CHECKNULL_CHAR)) + streq_lengthok_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_LENGTHOK)) class LoopToken(AbstractDescr): pass Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py Sun Sep 26 13:56:56 2010 @@ -4098,6 +4098,42 @@ """ self.optimize_loop(ops, 'Not, Not, Not, Not, Not', expected) + def test_str_slice_getitem1(self): + ops = """ + [p1, i1, i2, i3] + p2 = call(0, p1, i1, i2, descr=slicedescr) + i4 = strgetitem(p2, i3) + escape(i4) + jump(p1, i1, i2, i3) + """ + expected = """ + [p1, i1, i2, i3] + i6 = int_sub(i2, i1) # killed by the backend + i5 = int_add(i1, i3) + i4 = strgetitem(p1, i5) + escape(i4) + jump(p1, i1, i2, i3) + """ + self.optimize_loop(ops, 'Not, Not, Not, Not', expected) + + def test_str_slice_plain(self): + ops = """ + [i3, i4] + p1 = newstr(2) + strsetitem(p1, 0, i3) + strsetitem(p1, 1, i4) + p2 = call(0, p1, 1, 2, descr=slicedescr) + i5 = strgetitem(p2, 0) + escape(i5) + jump(i3, i4) + """ + expected = """ + [i3, i4] + escape(i4) + jump(i3, i4) + """ + self.optimize_loop(ops, 'Not, Not', expected) + def test_str_slice_concat(self): ops = """ [p1, i1, i2, p2] @@ -4119,6 +4155,345 @@ """ self.optimize_loop(ops, 'Not, Not, Not, Not', expected) + # ---------- + def optimize_loop_extradescrs(self, ops, spectext, optops): + from pypy.jit.metainterp.optimizeopt import string + def my_callinfo_for_oopspec(oopspecindex): + calldescrtype = type(LLtypeMixin.strequaldescr) + for value in LLtypeMixin.__dict__.values(): + if isinstance(value, calldescrtype): + if (value.get_extra_info() and + value.get_extra_info().oopspecindex == oopspecindex): + from pypy.rpython.lltypesystem import lltype + func = lltype.nullptr(lltype.FuncType([], lltype.Void)) + # returns 0 for 'func' in this test + return value, func + raise AssertionError("not found: oopspecindex=%d" % oopspecindex) + # + saved = string.callinfo_for_oopspec + try: + string.callinfo_for_oopspec = my_callinfo_for_oopspec + self.optimize_loop(ops, spectext, optops) + finally: + string.callinfo_for_oopspec = saved + + def test_str_equal_noop1(self): + ops = """ + [p1, p2] + i0 = call(0, p1, p2, descr=strequaldescr) + escape(i0) + jump(p1, p2) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not', ops) + + def test_str_equal_noop2(self): + ops = """ + [p1, p2, p3] + p4 = call(0, p1, p2, descr=strconcatdescr) + i0 = call(0, p3, p4, descr=strequaldescr) + escape(i0) + jump(p1, p2, p3) + """ + expected = """ + [p1, p2, p3] + i1 = strlen(p1) + i2 = strlen(p2) + i3 = int_add(i1, i2) + p4 = newstr(i3) + i4 = strlen(p1) + copystrcontent(p1, p4, 0, 0, i4) + i5 = strlen(p2) + i6 = int_add(i4, i5) # will be killed by the backend + copystrcontent(p2, p4, 0, i4, i5) + i0 = call(0, p3, p4, descr=strequaldescr) + escape(i0) + jump(p1, p2, p3) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not, Not', expected) + + def test_str_equal_slice1(self): + ops = """ + [p1, i1, i2, p3] + p4 = call(0, p1, i1, i2, descr=slicedescr) + i0 = call(0, p4, p3, descr=strequaldescr) + escape(i0) + jump(p1, i1, i2, p3) + """ + expected = """ + [p1, i1, i2, p3] + i3 = int_sub(i2, i1) + i0 = call(0, p1, i1, i3, p3, descr=streq_slice_checknull_descr) + escape(i0) + jump(p1, i1, i2, p3) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not, Not, Not', expected) + + def test_str_equal_slice2(self): + ops = """ + [p1, i1, i2, p3] + p4 = call(0, p1, i1, i2, descr=slicedescr) + i0 = call(0, p3, p4, descr=strequaldescr) + escape(i0) + jump(p1, i1, i2, p3) + """ + expected = """ + [p1, i1, i2, p3] + i4 = int_sub(i2, i1) + i0 = call(0, p1, i1, i4, p3, descr=streq_slice_checknull_descr) + escape(i0) + jump(p1, i1, i2, p3) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not, Not, Not', expected) + + def test_str_equal_slice3(self): + ops = """ + [p1, i1, i2, p3] + guard_nonnull(p3) [] + p4 = call(0, p1, i1, i2, descr=slicedescr) + i0 = call(0, p3, p4, descr=strequaldescr) + escape(i0) + jump(p1, i1, i2, p3) + """ + expected = """ + [p1, i1, i2, p3] + guard_nonnull(p3) [] + i4 = int_sub(i2, i1) + i0 = call(0, p1, i1, i4, p3, descr=streq_slice_nonnull_descr) + escape(i0) + jump(p1, i1, i2, p3) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not, Not, Not', expected) + + def test_str_equal_slice4(self): + ops = """ + [p1, i1, i2] + p3 = call(0, p1, i1, i2, descr=slicedescr) + i0 = call(0, p3, "x", descr=strequaldescr) + escape(i0) + jump(p1, i1, i2) + """ + expected = """ + [p1, i1, i2] + i3 = int_sub(i2, i1) + i0 = call(0, p1, i1, i3, 120, descr=streq_slice_char_descr) + escape(i0) + jump(p1, i1, i2) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not, Not', expected) + + def test_str_equal_slice5(self): + ops = """ + [p1, i1, i2, i3] + p4 = call(0, p1, i1, i2, descr=slicedescr) + p5 = newstr(1) + strsetitem(p5, 0, i3) + i0 = call(0, p5, p4, descr=strequaldescr) + escape(i0) + jump(p1, i1, i2, i3) + """ + expected = """ + [p1, i1, i2, i3] + i4 = int_sub(i2, i1) + i0 = call(0, p1, i1, i4, i3, descr=streq_slice_char_descr) + escape(i0) + jump(p1, i1, i2, i3) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not, Not, Not', expected) + + def test_str_equal_none1(self): + ops = """ + [p1] + i0 = call(0, p1, NULL, descr=strequaldescr) + escape(i0) + jump(p1) + """ + expected = """ + [p1] + i0 = ptr_eq(p1, NULL) + escape(i0) + jump(p1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_none2(self): + ops = """ + [p1] + i0 = call(0, NULL, p1, descr=strequaldescr) + escape(i0) + jump(p1) + """ + expected = """ + [p1] + i0 = ptr_eq(p1, NULL) + escape(i0) + jump(p1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_nonnull1(self): + ops = """ + [p1] + guard_nonnull(p1) [] + i0 = call(0, p1, "hello world", descr=strequaldescr) + escape(i0) + jump(p1) + """ + expected = """ + [p1] + guard_nonnull(p1) [] + i0 = call(0, p1, "hello world", descr=streq_nonnull_descr) + escape(i0) + jump(p1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_nonnull2(self): + ops = """ + [p1] + guard_nonnull(p1) [] + i0 = call(0, p1, "", descr=strequaldescr) + escape(i0) + jump(p1) + """ + expected = """ + [p1] + guard_nonnull(p1) [] + i1 = strlen(p1) + i0 = int_eq(i1, 0) + escape(i0) + jump(p1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_nonnull3(self): + ops = """ + [p1] + guard_nonnull(p1) [] + i0 = call(0, p1, "x", descr=strequaldescr) + escape(i0) + jump(p1) + """ + expected = """ + [p1] + guard_nonnull(p1) [] + i0 = call(0, p1, 120, descr=streq_nonnull_char_descr) + escape(i0) + jump(p1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_nonnull4(self): + ops = """ + [p1, p2] + p4 = call(0, p1, p2, descr=strconcatdescr) + i0 = call(0, "hello world", p4, descr=strequaldescr) + escape(i0) + jump(p1, p2) + """ + expected = """ + [p1, p2] + i1 = strlen(p1) + i2 = strlen(p2) + i3 = int_add(i1, i2) + p4 = newstr(i3) + i4 = strlen(p1) + copystrcontent(p1, p4, 0, 0, i4) + i5 = strlen(p2) + i6 = int_add(i4, i5) # will be killed by the backend + copystrcontent(p2, p4, 0, i4, i5) + i0 = call(0, "hello world", p4, descr=streq_nonnull_descr) + escape(i0) + jump(p1, p2) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not', expected) + + def test_str_equal_chars0(self): + ops = """ + [i1] + p1 = newstr(0) + i0 = call(0, p1, "", descr=strequaldescr) + escape(i0) + jump(i1) + """ + expected = """ + [i1] + escape(1) + jump(i1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_chars1(self): + ops = """ + [i1] + p1 = newstr(1) + strsetitem(p1, 0, i1) + i0 = call(0, p1, "x", descr=strequaldescr) + escape(i0) + jump(i1) + """ + expected = """ + [i1] + i0 = int_eq(i1, 120) # ord('x') + escape(i0) + jump(i1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_chars2(self): + ops = """ + [i1, i2] + p1 = newstr(2) + strsetitem(p1, 0, i1) + strsetitem(p1, 1, i2) + i0 = call(0, p1, "xy", descr=strequaldescr) + escape(i0) + jump(i1, i2) + """ + expected = """ + [i1, i2] + p1 = newstr(2) + strsetitem(p1, 0, i1) + strsetitem(p1, 1, i2) + i0 = call(0, p1, "xy", descr=streq_lengthok_descr) + escape(i0) + jump(i1, i2) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not', expected) + + def test_str_equal_chars3(self): + ops = """ + [p1] + i0 = call(0, "x", p1, descr=strequaldescr) + escape(i0) + jump(p1) + """ + expected = """ + [p1] + i0 = call(0, p1, 120, descr=streq_checknull_char_descr) + escape(i0) + jump(p1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_lengthmismatch1(self): + ops = """ + [i1] + p1 = newstr(1) + strsetitem(p1, 0, i1) + i0 = call(0, "xy", p1, descr=strequaldescr) + escape(i0) + jump(i1) + """ + expected = """ + [i1] + escape(0) + jump(i1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + # XXX unicode operations + # XXX str2unicode + ##class TestOOtype(BaseTestOptimizeOpt, OOtypeMixin): Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_resume.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_resume.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_resume.py Sun Sep 26 13:56:56 2010 @@ -199,10 +199,10 @@ def test_prepare_virtuals(): class FakeVinfo(object): - def allocate(self, decoder): - return "allocated" - def setfields(self, decoder, virtual): - assert virtual == "allocated" + def allocate(self, decoder, index): + s = "allocated" + decoder.virtuals_cache[index] = s + return s class FakeStorage(object): rd_virtuals = [FakeVinfo(), None] rd_numb = [] @@ -212,7 +212,97 @@ _already_allocated_resume_virtuals = None cpu = None reader = ResumeDataDirectReader(None, FakeStorage()) - assert reader.virtuals == ["allocated", reader.virtual_default] + assert reader.force_all_virtuals() == ["allocated", reader.virtual_default] + +# ____________________________________________________________ + +class FakeResumeDataReader(AbstractResumeDataReader): + def allocate_with_vtable(self, known_class): + return FakeBuiltObject(vtable=known_class) + def allocate_struct(self, typedescr): + return FakeBuiltObject(typedescr=typedescr) + def allocate_array(self, arraydescr, length): + return FakeBuiltObject(arraydescr=arraydescr, items=[None]*length) + def setfield(self, descr, struct, fieldnum): + setattr(struct, descr, fieldnum) + def setarrayitem_int(self, arraydescr, array, i, fieldnum): + assert 0 <= i < len(array.items) + assert arraydescr is array.arraydescr + array.items[i] = fieldnum + def allocate_string(self, length): + return FakeBuiltObject(string=[None]*length) + def string_setitem(self, string, i, fieldnum): + value, tag = untag(fieldnum) + assert tag == TAGINT + assert 0 <= i < len(string.string) + string.string[i] = value + def concat_strings(self, left, right): + return FakeBuiltObject(strconcat=[left, right]) + def slice_string(self, str, start, length): + return FakeBuiltObject(strslice=[str, start, length]) + +class FakeBuiltObject(object): + def __init__(self, **kwds): + self.__dict__ = kwds + def __eq__(self, other): + return (self.__class__ == other.__class__ and + self.__dict__ == other.__dict__) + def __repr__(self): + return 'FakeBuiltObject(%s)' % ( + ', '.join(['%s=%r' % item for item in self.__dict__.items()])) + +class FakeArrayDescr(object): + def is_array_of_pointers(self): return False + def is_array_of_floats(self): return False + +def test_virtualinfo(): + info = VirtualInfo(123, ["fielddescr1"]) + info.fieldnums = [tag(456, TAGINT)] + reader = FakeResumeDataReader() + reader._prepare_virtuals([info]) + assert reader.force_all_virtuals() == [ + FakeBuiltObject(vtable=123, fielddescr1=tag(456, TAGINT))] + +def test_vstructinfo(): + info = VStructInfo(124, ["fielddescr1"]) + info.fieldnums = [tag(456, TAGINT)] + reader = FakeResumeDataReader() + reader._prepare_virtuals([info]) + assert reader.force_all_virtuals() == [ + FakeBuiltObject(typedescr=124, fielddescr1=tag(456, TAGINT))] + +def test_varrayinfo(): + arraydescr = FakeArrayDescr() + info = VArrayInfo(arraydescr) + info.fieldnums = [tag(456, TAGINT)] + reader = FakeResumeDataReader() + reader._prepare_virtuals([info]) + assert reader.force_all_virtuals() == [ + FakeBuiltObject(arraydescr=arraydescr, items=[tag(456, TAGINT)])] + +def test_vstrplaininfo(): + info = VStrPlainInfo() + info.fieldnums = [tag(60, TAGINT)] + reader = FakeResumeDataReader() + reader._prepare_virtuals([info]) + assert reader.force_all_virtuals() == [ + FakeBuiltObject(string=[60])] + +def test_vstrconcatinfo(): + info = VStrConcatInfo() + info.fieldnums = [tag(10, TAGBOX), tag(20, TAGBOX)] + reader = FakeResumeDataReader() + reader._prepare_virtuals([info]) + assert reader.force_all_virtuals() == [ + FakeBuiltObject(strconcat=info.fieldnums)] + +def test_vstrsliceinfo(): + info = VStrSliceInfo() + info.fieldnums = [tag(10, TAGBOX), tag(20, TAGBOX), tag(30, TAGBOX)] + reader = FakeResumeDataReader() + reader._prepare_virtuals([info]) + assert reader.force_all_virtuals() == [ + FakeBuiltObject(strslice=info.fieldnums)] # ____________________________________________________________ @@ -957,7 +1047,7 @@ metainterp = MyMetaInterp() reader = ResumeDataFakeReader(storage, newboxes, metainterp) - assert len(reader.virtuals) == 2 + assert len(reader.virtuals_cache) == 2 b2t = reader.decode_ref(modifier._gettagged(b2s)) b4t = reader.decode_ref(modifier._gettagged(b4s)) trace = metainterp.trace @@ -973,9 +1063,9 @@ (rop.SETFIELD_GC, [b4t, b3t], None, LLtypeMixin.valuedescr), (rop.SETFIELD_GC, [b4t, b5t], None, LLtypeMixin.otherdescr)] if untag(modifier._gettagged(b2s))[0] == -2: - expected = [b2new, b4new] + b2set + b4set + expected = [b2new, b4new] + b4set + b2set else: - expected = [b4new, b2new] + b4set + b2set + expected = [b4new, b2new] + b2set + b4set for x, y in zip(expected, trace): assert x == y @@ -1020,7 +1110,7 @@ # resume metainterp = MyMetaInterp() reader = ResumeDataFakeReader(storage, newboxes, metainterp) - assert len(reader.virtuals) == 1 + assert len(reader.virtuals_cache) == 1 b2t = reader.decode_ref(tag(0, TAGVIRTUAL)) trace = metainterp.trace expected = [ @@ -1065,7 +1155,7 @@ NULL = ConstPtr.value metainterp = MyMetaInterp() reader = ResumeDataFakeReader(storage, newboxes, metainterp) - assert len(reader.virtuals) == 1 + assert len(reader.virtuals_cache) == 1 b2t = reader.decode_ref(tag(0, TAGVIRTUAL)) trace = metainterp.trace @@ -1112,7 +1202,7 @@ metainterp = MyMetaInterp() reader = ResumeDataFakeReader(storage, newboxes, metainterp) - assert reader.virtuals is None + assert reader.virtuals_cache is None trace = metainterp.trace b2set = (rop.SETFIELD_GC, [b2t, b4t], None, LLtypeMixin.nextdescr) expected = [b2set] Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_string.py Sun Sep 26 13:56:56 2010 @@ -281,6 +281,24 @@ return 42 self.meta_interp(f, [10, 10]) + def test_streq_char(self): + for somestr in ["?abcdefg", ]: #u"def"]: + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + def f(n, m): + assert n >= 0 + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = somestr[:m] + escape(s == "?") + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=0, newunicode=0) + class TestOOtype(StringTests, OOJitMixin): CALL = "oosend" Modified: pypy/branch/jit-str/pypy/jit/metainterp/warmstate.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/warmstate.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/warmstate.py Sun Sep 26 13:56:56 2010 @@ -83,7 +83,7 @@ return history.ConstFloat(value) else: return history.BoxFloat(value) - elif isinstance(value, (str, unicode)): + elif isinstance(value, str) or isinstance(value, unicode): assert len(value) == 1 # must be a character value = ord(value) else: From arigo at codespeak.net Sun Sep 26 14:00:53 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 26 Sep 2010 14:00:53 +0200 (CEST) Subject: [pypy-svn] r77377 - pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt Message-ID: <20100926120053.E5011282BAD@codespeak.net> Author: arigo Date: Sun Sep 26 14:00:52 2010 New Revision: 77377 Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/string.py Log: Fix annotation: the _attrs_ forces the attributes to be known, even in examples where we never instantiate the classes. Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/string.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/string.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/string.py Sun Sep 26 14:00:52 2010 @@ -11,7 +11,6 @@ from pypy.jit.codewriter.effectinfo import EffectInfo, callinfo_for_oopspec from pypy.jit.codewriter import heaptracker from pypy.rlib.unroll import unrolling_iterable -from pypy.rlib.objectmodel import missing_value class __extend__(optimizer.OptValue): @@ -48,6 +47,7 @@ class VAbstractStringValue(virtualize.AbstractVirtualValue): + _attrs_ = () def _really_force(self): s = self.get_constant_string() @@ -155,7 +155,7 @@ class VStringSliceValue(VAbstractStringValue): """A slice.""" - vstr = vstart = vlength = missing_value # annotator fix + _attrs_ = ('vstr', 'vstart', 'vlength') def setup(self, vstr, vstart, vlength): self.vstr = vstr From arigo at codespeak.net Sun Sep 26 14:02:30 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 26 Sep 2010 14:02:30 +0200 (CEST) Subject: [pypy-svn] r77378 - in pypy/branch/jit-str/pypy/rpython: . lltypesystem Message-ID: <20100926120230.52475282BAD@codespeak.net> Author: arigo Date: Sun Sep 26 14:02:28 2010 New Revision: 77378 Modified: pypy/branch/jit-str/pypy/rpython/annlowlevel.py pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py Log: Forgot these changes, necessary to link to the new code. Modified: pypy/branch/jit-str/pypy/rpython/annlowlevel.py ============================================================================== --- pypy/branch/jit-str/pypy/rpython/annlowlevel.py (original) +++ pypy/branch/jit-str/pypy/rpython/annlowlevel.py Sun Sep 26 14:02:28 2010 @@ -397,6 +397,8 @@ assert strtype in (str, unicode) def hlstr(ll_s): + if not ll_s: + return None if hasattr(ll_s, 'chars'): if strtype is str: return ''.join(ll_s.chars) @@ -423,9 +425,14 @@ def llstr(s): from pypy.rpython.lltypesystem.rstr import mallocstr, mallocunicode + from pypy.rpython.lltypesystem.rstr import STR, UNICODE if strtype is str: + if s is None: + return lltype.nullptr(STR) ll_s = mallocstr(len(s)) else: + if s is None: + return lltype.nullptr(UNICODE) ll_s = mallocunicode(len(s)) for i, c in enumerate(s): ll_s.chars[i] = c Modified: pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py ============================================================================== --- pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py (original) +++ pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py Sun Sep 26 14:02:28 2010 @@ -318,6 +318,7 @@ def ll_strfasthash(s): return s.hash # assumes that the hash is already computed + @purefunction def ll_strconcat(s1, s2): len1 = len(s1.chars) len2 = len(s2.chars) @@ -443,8 +444,8 @@ if chars1[j] != chars2[j]: return False j += 1 - return True + ll_streq.oopspec = 'stroruni.equal(s1, s2)' @purefunction def ll_startswith(s1, s2): @@ -693,6 +694,7 @@ i += 1 return result + @purefunction def _ll_stringslice(s1, start, stop): newstr = s1.malloc(stop - start) assert start >= 0 From arigo at codespeak.net Sun Sep 26 14:06:35 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 26 Sep 2010 14:06:35 +0200 (CEST) Subject: [pypy-svn] r77379 - pypy/branch/jit-str/pypy/objspace/std Message-ID: <20100926120635.3020D282BAD@codespeak.net> Author: arigo Date: Sun Sep 26 14:06:33 2010 New Revision: 77379 Modified: pypy/branch/jit-str/pypy/objspace/std/stringtype.py Log: Experimental: turn off char sharing under the jit. Modified: pypy/branch/jit-str/pypy/objspace/std/stringtype.py ============================================================================== --- pypy/branch/jit-str/pypy/objspace/std/stringtype.py (original) +++ pypy/branch/jit-str/pypy/objspace/std/stringtype.py Sun Sep 26 14:06:33 2010 @@ -4,6 +4,7 @@ from sys import maxint from pypy.rlib.objectmodel import specialize +from pypy.rlib.jit import we_are_jitted def wrapstr(space, s): from pypy.objspace.std.stringobject import W_StringObject @@ -32,7 +33,7 @@ def wrapchar(space, c): from pypy.objspace.std.stringobject import W_StringObject from pypy.objspace.std.ropeobject import rope, W_RopeObject - if space.config.objspace.std.withprebuiltchar: + if space.config.objspace.std.withprebuiltchar and not we_are_jitted(): if space.config.objspace.std.withrope: return W_RopeObject.PREBUILT[ord(c)] return W_StringObject.PREBUILT[ord(c)] From arigo at codespeak.net Sun Sep 26 16:55:20 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 26 Sep 2010 16:55:20 +0200 (CEST) Subject: [pypy-svn] r77380 - pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc Message-ID: <20100926145520.21F4E282BAD@codespeak.net> Author: arigo Date: Sun Sep 26 16:55:18 2010 New Revision: 77380 Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py Log: Oups. The max_heap_size was ignored in the initial setting of next_major_collection_threshold. Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py Sun Sep 26 16:55:18 2010 @@ -277,10 +277,21 @@ # the end of the nursery: self.nursery_top = self.nursery + self.nursery_size # initialize the threshold, a bit arbitrarily - self.next_major_collection_threshold = ( - self.nursery_size * self.major_collection_threshold) + self.set_major_threshold_from(self.nursery_size * + self.major_collection_threshold) debug_stop("gc-set-nursery-size") + def set_major_threshold_from(self, threshold): + # Set the next_major_collection_threshold. + if self.max_heap_size > 0.0 and threshold > self.max_heap_size: + threshold = self.max_heap_size + bounded = True + else: + bounded = False + # + self.next_major_collection_threshold = threshold + return bounded + def malloc_fixedsize_clear(self, typeid, size, can_collect=True, needs_finalizer=False, contains_weakptr=False): @@ -1119,30 +1130,26 @@ # Set the threshold for the next major collection to be when we # have allocated 'major_collection_threshold' times more than # we currently have. - self.next_major_collection_threshold = ( + bounded = self.set_major_threshold_from( (self.get_total_memory_used() * self.major_collection_threshold) + reserving_size) # # Max heap size: gives an upper bound on the threshold. If we # already have at least this much allocated, raise MemoryError. - if (self.max_heap_size > 0.0 and - self.next_major_collection_threshold > self.max_heap_size): + if bounded and (float(self.get_total_memory_used()) + reserving_size >= + self.next_major_collection_threshold): # - self.next_major_collection_threshold = self.max_heap_size - if (float(self.get_total_memory_used()) + reserving_size >= - self.next_major_collection_threshold): - # - # First raise MemoryError, giving the program a chance to - # quit cleanly. It might still allocate in the nursery, - # which might eventually be emptied, triggering another - # major collect and (possibly) reaching here again with an - # even higher memory consumption. To prevent it, if it's - # the second time we are here, then abort the program. - if self.max_heap_size_already_raised: - llop.debug_fatalerror(lltype.Void, - "Using too much memory, aborting") - self.max_heap_size_already_raised = True - raise MemoryError + # First raise MemoryError, giving the program a chance to + # quit cleanly. It might still allocate in the nursery, + # which might eventually be emptied, triggering another + # major collect and (possibly) reaching here again with an + # even higher memory consumption. To prevent it, if it's + # the second time we are here, then abort the program. + if self.max_heap_size_already_raised: + llop.debug_fatalerror(lltype.Void, + "Using too much memory, aborting") + self.max_heap_size_already_raised = True + raise MemoryError # # At the end, we can execute the finalizers of the objects # listed in 'run_finalizers'. Note that this will typically do From arigo at codespeak.net Sun Sep 26 17:14:04 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 26 Sep 2010 17:14:04 +0200 (CEST) Subject: [pypy-svn] r77381 - pypy/branch/jit-str/pypy/rpython/lltypesystem Message-ID: <20100926151404.E4CBB282BAD@codespeak.net> Author: arigo Date: Sun Sep 26 17:14:03 2010 New Revision: 77381 Modified: pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py Log: Kill these, which are mostly duplicates of each other. Helps the JIT. Modified: pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py ============================================================================== --- pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py (original) +++ pypy/branch/jit-str/pypy/rpython/lltypesystem/rstr.py Sun Sep 26 17:14:03 2010 @@ -696,24 +696,16 @@ @purefunction def _ll_stringslice(s1, start, stop): - newstr = s1.malloc(stop - start) - assert start >= 0 lgt = stop - start + assert start >= 0 assert lgt >= 0 + newstr = s1.malloc(lgt) s1.copy_contents(s1, newstr, start, 0, lgt) return newstr _ll_stringslice.oopspec = 'stroruni.slice(s1, start, stop)' def ll_stringslice_startonly(s1, start): - len1 = len(s1.chars) - if we_are_jitted(): - return LLHelpers._ll_stringslice(s1, start, len1) - newstr = s1.malloc(len1 - start) - lgt = len1 - start - assert lgt >= 0 - assert start >= 0 - s1.copy_contents(s1, newstr, start, 0, lgt) - return newstr + return LLHelpers._ll_stringslice(s1, start, len(s1.chars)) def ll_stringslice_startstop(s1, start, stop): if we_are_jitted(): @@ -728,12 +720,7 @@ def ll_stringslice_minusone(s1): newlen = len(s1.chars) - 1 - if we_are_jitted(): - return LLHelpers._ll_stringslice(s1, 0, newlen) - newstr = s1.malloc(newlen) - assert newlen >= 0 - s1.copy_contents(s1, newstr, 0, 0, newlen) - return newstr + return LLHelpers._ll_stringslice(s1, 0, newlen) def ll_split_chr(LIST, s, c): chars = s.chars From arigo at codespeak.net Sun Sep 26 17:15:08 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 26 Sep 2010 17:15:08 +0200 (CEST) Subject: [pypy-svn] r77382 - in pypy/branch/jit-str/pypy/jit: codewriter metainterp/optimizeopt metainterp/test Message-ID: <20100926151508.9B7EE282BAD@codespeak.net> Author: arigo Date: Sun Sep 26 17:15:06 2010 New Revision: 77382 Modified: pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/string.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/jit-str/pypy/jit/metainterp/test/test_ztranslation.py Log: Mostly translation fixes. Also undo a reordering in string.py which was buggy (and shown by test_optimizeopt). Modified: pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/effectinfo.py Sun Sep 26 17:15:06 2010 @@ -129,20 +129,14 @@ # ____________________________________________________________ -_callinfo_for_oopspec = {} - -def _callinfo_for_oopspec_memo(oopspecindex): - return _callinfo_for_oopspec.get(oopspecindex, (None, 0)) -_callinfo_for_oopspec_memo._annspecialcase_ = 'specialize:memo' +_callinfo_for_oopspec = {} # {oopspecindex: (calldescr, func_as_int)} def callinfo_for_oopspec(oopspecindex): - """A memo function that returns the calldescr and the function + """A function that returns the calldescr and the function address (as an int) of one of the OS_XYZ functions defined above. Don't use this if there might be several implementations of the same OS_XYZ specialized by type, e.g. OS_ARRAYCOPY.""" - calldescr, func = _callinfo_for_oopspec_memo(oopspecindex) - assert calldescr is not None - return calldescr, func + return _callinfo_for_oopspec[oopspecindex] def _funcptr_for_oopspec_memo(oopspecindex): Modified: pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/jtransform.py Sun Sep 26 17:15:06 2010 @@ -1055,7 +1055,9 @@ [c_func] + [varoftype(T) for T in argtypes], varoftype(resulttype)) calldescr = self.callcontrol.getcalldescr(op, oopspecindex) - _callinfo_for_oopspec[oopspecindex] = calldescr, c_func.value + func = heaptracker.adr2int( + llmemory.cast_ptr_to_adr(c_func.value)) + _callinfo_for_oopspec[oopspecindex] = calldescr, func def _handle_stroruni_call(self, op, oopspec_name, args): if args[0].concretetype.TO == rstr.STR: Modified: pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/string.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/string.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/string.py Sun Sep 26 17:15:06 2010 @@ -172,6 +172,8 @@ return None start = self.vstart.box.getint() length = self.vlength.box.getint() + assert start >= 0 + assert length >= 0 return s1[start : start + length] return None @@ -384,6 +386,9 @@ return True # vstr.ensure_nonnull() + lengthbox = _int_sub(newoperations, vstop.force_box(), + vstart.force_box()) + # if isinstance(vstr, VStringSliceValue): # double slicing s[i:j][k:l] vintermediate = vstr @@ -393,8 +398,6 @@ vstart.force_box()) vstart = self.getvalue(startbox) # - lengthbox = _int_sub(newoperations, vstop.force_box(), - vstart.force_box()) value = self.make_vstring_slice(op.result, op) value.setup(vstr, vstart, self.getvalue(lengthbox)) return True @@ -506,12 +509,9 @@ def generate_modified_call(self, oopspecindex, args, result): calldescr, func = callinfo_for_oopspec(oopspecindex) - func = llmemory.cast_ptr_to_adr(func) - func = heaptracker.adr2int(func) op = ResOperation(rop.CALL, [ConstInt(func)] + args, result, descr=calldescr) self.optimizer.newoperations.append(op) - generate_modified_call._annspecialcase_ = 'specialize:arg(1)' def propagate_forward(self, op): opnum = op.opnum Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_optimizeopt.py Sun Sep 26 17:15:06 2010 @@ -4164,10 +4164,8 @@ if isinstance(value, calldescrtype): if (value.get_extra_info() and value.get_extra_info().oopspecindex == oopspecindex): - from pypy.rpython.lltypesystem import lltype - func = lltype.nullptr(lltype.FuncType([], lltype.Void)) # returns 0 for 'func' in this test - return value, func + return value, 0 raise AssertionError("not found: oopspecindex=%d" % oopspecindex) # saved = string.callinfo_for_oopspec Modified: pypy/branch/jit-str/pypy/jit/metainterp/test/test_ztranslation.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/test/test_ztranslation.py (original) +++ pypy/branch/jit-str/pypy/jit/metainterp/test/test_ztranslation.py Sun Sep 26 17:15:06 2010 @@ -21,6 +21,7 @@ # - full optimizer # - jitdriver hooks # - two JITs + # - string concatenation, slicing and comparison class Frame(object): _virtualizable2_ = ['i'] @@ -60,11 +61,15 @@ frame.i -= 1 return total * 10 # - myjitdriver2 = JitDriver(greens = ['g'], reds = ['m', 'x']) + myjitdriver2 = JitDriver(greens = ['g'], reds = ['m', 'x', 's']) def f2(g, m, x): + s = "" while m > 0: - myjitdriver2.can_enter_jit(g=g, m=m, x=x) - myjitdriver2.jit_merge_point(g=g, m=m, x=x) + myjitdriver2.can_enter_jit(g=g, m=m, x=x, s=s) + myjitdriver2.jit_merge_point(g=g, m=m, x=x, s=s) + s += 'xy' + if s[:2] == 'yz': + return -666 m -= 1 x += 3 return x From arigo at codespeak.net Sun Sep 26 17:45:47 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 26 Sep 2010 17:45:47 +0200 (CEST) Subject: [pypy-svn] r77383 - pypy/branch/jit-str/pypy/jit/codewriter Message-ID: <20100926154547.9D508282BAD@codespeak.net> Author: arigo Date: Sun Sep 26 17:45:46 2010 New Revision: 77383 Modified: pypy/branch/jit-str/pypy/jit/codewriter/assembler.py pypy/branch/jit-str/pypy/jit/codewriter/codewriter.py Log: Fix for metainterp/test/test_ztranslation.py. Actually fixes the fact that the log printer did not know about the extra helpers introduced by jtransform (the variants of string equality). Modified: pypy/branch/jit-str/pypy/jit/codewriter/assembler.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/assembler.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/assembler.py Sun Sep 26 17:45:46 2010 @@ -232,3 +232,11 @@ return addr = llmemory.cast_ptr_to_adr(value) self.list_of_addr2name.append((addr, name)) + + def finished(self): + # Helper called at the end of assembling. Registers the extra + # functions shown in _callinfo_for_oopspec. + from pypy.jit.codewriter.effectinfo import _callinfo_for_oopspec + for _, func in _callinfo_for_oopspec.values(): + func = heaptracker.int2adr(func) + self.see_raw_object(func.ptr) Modified: pypy/branch/jit-str/pypy/jit/codewriter/codewriter.py ============================================================================== --- pypy/branch/jit-str/pypy/jit/codewriter/codewriter.py (original) +++ pypy/branch/jit-str/pypy/jit/codewriter/codewriter.py Sun Sep 26 17:45:46 2010 @@ -73,6 +73,7 @@ count += 1 if not count % 500: log.info("Produced %d jitcodes" % count) + self.assembler.finished() heaptracker.finish_registering(self.cpu) log.info("there are %d JitCode instances." % count) From arigo at codespeak.net Sun Sep 26 18:05:31 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 26 Sep 2010 18:05:31 +0200 (CEST) Subject: [pypy-svn] r77384 - pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc Message-ID: <20100926160531.776CD282BAD@codespeak.net> Author: arigo Date: Sun Sep 26 18:05:29 2010 New Revision: 77384 Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py Log: Add a PYPY_GC_MIN env var. Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py Sun Sep 26 18:05:29 2010 @@ -93,7 +93,8 @@ # PYPY_GC_NURSERY and fall back to half the size of # the L2 cache. For 'major_collection_threshold' it will look # it up in the env var PYPY_GC_MAJOR_COLLECT. It also sets - # 'max_heap_size' to PYPY_GC_MAX. + # 'max_heap_size' to PYPY_GC_MAX. Finally, PYPY_GC_MIN sets + # the minimal value of 'next_major_collection_threshold'. "read_from_env": True, # The size of the nursery. Note that this is only used as a @@ -156,6 +157,7 @@ self.small_request_threshold = small_request_threshold self.major_collection_threshold = major_collection_threshold self.num_major_collects = 0 + self.min_heap_size = 0.0 self.max_heap_size = 0.0 self.max_heap_size_already_raised = False # @@ -251,6 +253,9 @@ if major_coll >= 1.0: self.major_collection_threshold = major_coll # + min_heap_size = base.read_uint_from_env('PYPY_GC_MIN') + self.min_heap_size = float(min_heap_size) + # max_heap_size = base.read_uint_from_env('PYPY_GC_MAX') if max_heap_size > 0: self.max_heap_size = float(max_heap_size) @@ -276,13 +281,17 @@ self.nursery_free = self.nursery # the end of the nursery: self.nursery_top = self.nursery + self.nursery_size - # initialize the threshold, a bit arbitrarily - self.set_major_threshold_from(self.nursery_size * - self.major_collection_threshold) + # initialize the threshold + self.min_heap_size = max(self.min_heap_size, self.nursery_size * + self.major_collection_threshold) + self.set_major_threshold_from(0.0) debug_stop("gc-set-nursery-size") def set_major_threshold_from(self, threshold): # Set the next_major_collection_threshold. + if threshold < self.min_heap_size: + threshold = self.min_heap_size + # if self.max_heap_size > 0.0 and threshold > self.max_heap_size: threshold = self.max_heap_size bounded = True From cfbolz at codespeak.net Sun Sep 26 18:07:01 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Sun, 26 Sep 2010 18:07:01 +0200 (CEST) Subject: [pypy-svn] r77385 - pypy/extradoc/talk/pepm2011 Message-ID: <20100926160701.D1940282BAD@codespeak.net> Author: cfbolz Date: Sun Sep 26 18:07:00 2010 New Revision: 77385 Modified: pypy/extradoc/talk/pepm2011/paper.bib (contents, props changed) pypy/extradoc/talk/pepm2011/paper.tex Log: whack at the introduction Modified: pypy/extradoc/talk/pepm2011/paper.bib ============================================================================== Binary files. No diff available. Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Sun Sep 26 18:07:00 2010 @@ -100,28 +100,55 @@ overhead of the interpreter's data structures, such as operand stack etc. The second important problem that any JIT for a dynamic language needs to solve is how to deal with the overhead of boxing of primitive types and of type -dispatching. Those are problems that are usually not present in statically typed -languages. +dispatching. Those are problems that are usually not present or at least less +severe in statically typed languages. Boxing of primitive types means that dynamic languages need to be able to handle -all objects, even integers, floats, etc. in the same way as user-defined +all objects, even integers, floats, bools etc. in the same way as user-defined instances. Thus those primitive types are usually \emph{boxed}, i.e. a small -heap-structure is allocated for them, that contains the actual value. +heap-structure is allocated for them, that contains the actual value. Boxing +primitive types can be very costly, because XXX Type dispatching is the process of finding the concrete implementation that is applicable to the objects at hand when doing a generic operation on them. An example would be the addition of two objects: The addition needs to check what the concrete objects that should be added are, and choose the implementation -that is fitting for them. +that is fitting for them. Type dispatching is a very common operation in a +dynamic language because no types are known at compile time, so all operations +need it. + +A recently popular approach to implementing just-in-time compilers for dynamic +languages is that of a tracing JIT. A tracing JIT often takes the form of an +extension to an existing interpreter, which can be sped up that way. The PyPy +project is an environment for implementing dynamic programming languages. It's +approach to doing so is to straightforwardly implement an interpreter for the +to-be-implemented language, and then use powerful tools to turn the interpreter +into an efficient VM that also contains a just-in-time compiler. This compiler +is automatically generated from the interpreter using partial-evaluation-like +techniques \cite{bolz_tracing_2009}. The PyPy project and its approach to +tracing JIT compilers is described in Section~\ref{sec:Background} -Last year, we wrote a paper \cite{XXX} about how PyPy's meta-JIT -approach works. These explain how the meta-tracing JIT can remove the overhead +The tracing JIT approach that the PyPy project is taking removes the overhead of bytecode dispatch. In this paper we want to explain how the traces that are -produced by our meta-tracing JIT are then optimized to also remove some of the +produced by our meta-tracing JIT can be optimized to also remove some of the overhead more closely associated to dynamic languages, such as boxing overhead -and type dispatching. The most important technique to achieve this is a form of -escape analysis \cite{XXX} that we call \emph{virtual objects}. This is best -explained via an example. +and type dispatching. To understand the problem more closely, we analyze the +occurring object lifetimes in Section~\ref{sec:lifetimes}. The most important +technique to achieve this is a form of escape analysis \cite{XXX} that we call +\emph{virtual objects}, which is described in Section~\ref{sec:virtuals}. The +basic approach of virtual objects can then be extended to also be used for +type-specializing the traces that are produced by the tracing JIT +(Section~\ref{sec:crossloop}). In Section~\ref{sec:XXX} we describe some +supporting techniques that are not central to the approach, but are needed to +improve the results. The introduced techniques are evaluated in +Section~\ref{sec:Evaluation}. + +The contributions of this paper are: + +\begin{enumerate} + \item An efficient and effective escape analysis for a tracing JIT + \item XXX +\end{enumerate} \section{Background} \label{sec:Background} @@ -184,7 +211,7 @@ 3) or escape (category 4). \section{Escape Analysis in a Tracing JIT} -\label{sec:Escape Analysis in a Tracing JIT} +\label{sec:virtuals} \subsection{Running Example} From arigo at codespeak.net Sun Sep 26 18:10:04 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 26 Sep 2010 18:10:04 +0200 (CEST) Subject: [pypy-svn] r77386 - pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc Message-ID: <20100926161004.2228F282BAD@codespeak.net> Author: arigo Date: Sun Sep 26 18:10:02 2010 New Revision: 77386 Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py Log: Change the default: now PYPY_GC_MIN defaults to 8 times the nursery size, which is good to avoid spending all our time doing small major collections if the program is not allocating much at all. Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py Sun Sep 26 18:10:02 2010 @@ -248,13 +248,18 @@ newsize = generation.estimate_best_nursery_size() if newsize <= 0: newsize = defaultsize + newsize = max(newsize, minsize) # major_coll = base.read_float_from_env('PYPY_GC_MAJOR_COLLECT') if major_coll >= 1.0: self.major_collection_threshold = major_coll # min_heap_size = base.read_uint_from_env('PYPY_GC_MIN') - self.min_heap_size = float(min_heap_size) + if min_heap_size > 0: + self.min_heap_size = float(min_heap_size) + else: + # defaults to 8 times the nursery + self.min_heap_size = newsize * 8 # max_heap_size = base.read_uint_from_env('PYPY_GC_MAX') if max_heap_size > 0: @@ -262,7 +267,7 @@ # self.minor_collection() # to empty the nursery llarena.arena_free(self.nursery) - self.nursery_size = max(newsize, minsize) + self.nursery_size = newsize self.allocate_nursery() From cfbolz at codespeak.net Sun Sep 26 18:19:18 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Sun, 26 Sep 2010 18:19:18 +0200 (CEST) Subject: [pypy-svn] r77387 - pypy/extradoc/talk/pepm2011 Message-ID: <20100926161918.6CE12282BAD@codespeak.net> Author: cfbolz Date: Sun Sep 26 18:19:16 2010 New Revision: 77387 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: fix an XXX, add a thought Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Sun Sep 26 18:19:16 2010 @@ -107,7 +107,10 @@ all objects, even integers, floats, bools etc. in the same way as user-defined instances. Thus those primitive types are usually \emph{boxed}, i.e. a small heap-structure is allocated for them, that contains the actual value. Boxing -primitive types can be very costly, because XXX +primitive types can be very costly, because a lot of common operations, +particularly all arithmetic operations, have to produce a new box, in addition +to the actual computation they do. Because the boxes are allocated on the heap, +producing a lot of them puts pressure on the garbage collector. Type dispatching is the process of finding the concrete implementation that is applicable to the objects at hand when doing a generic operation on them. An @@ -156,7 +159,6 @@ \subsection{Tracing JIT Compilers} \label{sub:JIT_background} -XXX object model and its reflection in traces (e.g. guard\_class before each method call) traces and bridges @@ -164,6 +166,8 @@ getting from the interpreter to traces +XXX object model and its reflection in traces (e.g. guard\_class before each method call) + \subsection{PyPy} \label{sub:PyPy} @@ -601,6 +605,7 @@ \includegraphics{figures/step4.pdf} \end{figure} +XXX optimization particularly effective for chains of operations %___________________________________________________________________________ From arigo at codespeak.net Sun Sep 26 19:15:30 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Sun, 26 Sep 2010 19:15:30 +0200 (CEST) Subject: [pypy-svn] r77388 - pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc Message-ID: <20100926171530.E8A1A282BF8@codespeak.net> Author: arigo Date: Sun Sep 26 19:15:29 2010 New Revision: 77388 Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py Log: Try harder to keep the value of 'rawmalloc_total_size' correct. Done by always adding/subtracting from it a value that is a multiple of WORD. Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/minimark.py Sun Sep 26 19:15:29 2010 @@ -509,7 +509,16 @@ cardheadersize = WORD * extra_words extra_flags = GCFLAG_HAS_CARDS # - allocsize = cardheadersize + raw_malloc_usage(totalsize) + # Detect very rare cases of overflows + if raw_malloc_usage(totalsize) > (sys.maxint - (WORD-1) + - cardheadersize): + raise MemoryError("rare case of overflow") + # + # Now we know that the following computations cannot overflow. + # Note that round_up_for_allocation() is also needed to get the + # correct number added to 'rawmalloced_total_size'. + allocsize = (cardheadersize + raw_malloc_usage( + llarena.round_up_for_allocation(totalsize))) # # Allocate the object using arena_malloc(), which we assume here # is just the same as raw_malloc(), but allows the extra @@ -534,8 +543,8 @@ result = arena + cardheadersize llarena.arena_reserve(result, totalsize) # - # Record the newly allocated object and its size. - self.rawmalloced_total_size += raw_malloc_usage(totalsize) + # Record the newly allocated object and its full malloced size. + self.rawmalloced_total_size += allocsize self.rawmalloced_objects.append(result + size_gc_header) # # Common code to fill the header and length of the object. @@ -1064,6 +1073,10 @@ _malloc_out_of_nursery._always_inline_ = True def _malloc_out_of_nursery_nonsmall(self, totalsize): + # 'totalsize' should be aligned. + ll_assert(raw_malloc_usage(totalsize) & (WORD-1) == 0, + "misaligned totalsize in _malloc_out_of_nursery_nonsmall") + # arena = llarena.arena_malloc(raw_malloc_usage(totalsize), False) if not arena: raise MemoryError("cannot allocate object") @@ -1195,8 +1208,7 @@ self.rawmalloced_objects.append(obj) else: totalsize = size_gc_header + self.get_size(obj) - rawtotalsize = raw_malloc_usage(totalsize) - self.rawmalloced_total_size -= rawtotalsize + allocsize = raw_malloc_usage(totalsize) arena = llarena.getfakearenaaddress(obj - size_gc_header) # # Must also include the card marker area, if any @@ -1211,8 +1223,10 @@ length = (obj + offset_to_length).signed[0] extra_words = self.card_marking_words_for_length(length) arena -= extra_words * WORD + allocsize += extra_words * WORD # llarena.arena_free(arena) + self.rawmalloced_total_size -= allocsize # list.delete() From cfbolz at codespeak.net Sun Sep 26 20:40:55 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Sun, 26 Sep 2010 20:40:55 +0200 (CEST) Subject: [pypy-svn] r77389 - pypy/extradoc/talk/pepm2011 Message-ID: <20100926184055.AE9BC282BAD@codespeak.net> Author: cfbolz Date: Sun Sep 26 20:40:53 2010 New Revision: 77389 Modified: pypy/extradoc/talk/pepm2011/paper.bib pypy/extradoc/talk/pepm2011/paper.tex Log: reorder some stuff, write two paragraphs about pypy Modified: pypy/extradoc/talk/pepm2011/paper.bib ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.bib (original) +++ pypy/extradoc/talk/pepm2011/paper.bib Sun Sep 26 20:40:53 2010 @@ -53,6 +53,22 @@ howpublished = {{https://developer.mozilla.org/en/SpiderMonkey/Internals/Property\_cache}} }, + at inproceedings{davide_ancona_rpython:_2007, + address = {Montreal, Quebec, Canada}, + title = {{RPython:} a step towards reconciling dynamically and statically typed {OO} languages}, + isbn = {978-1-59593-868-8}, + shorttitle = {{RPython}}, + url = {http://portal.acm.org/citation.cfm?id=1297091}, + doi = {10.1145/1297081.1297091}, + abstract = {Although the C-based interpreter of Python is reasonably fast, implementations on the {CLI} or the {JVM} platforms offers some advantages in terms of robustness and interoperability. Unfortunately, because the {CLI} and {JVM} are primarily designed to execute statically typed, object-oriented languages, most dynamic language implementations cannot use the native bytecodes for common operations like method calls and exception handling; as a result, they are not able to take full advantage of the power offered by the {CLI} and {JVM.}}, + booktitle = {Proceedings of the 2007 symposium on Dynamic languages}, + publisher = {{ACM}}, + author = {Davide Ancona and Massimo Ancona and Antonio Cuni and Nicholas D. Matsakis}, + year = {2007}, + keywords = {{JVM,} .net, Python}, + pages = {53--64} +}, + @inproceedings{armin_rigo_pypys_2006, address = {Portland, Oregon, {USA}}, title = {{PyPy's} approach to virtual machine construction}, Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Sun Sep 26 20:40:53 2010 @@ -133,29 +133,55 @@ The tracing JIT approach that the PyPy project is taking removes the overhead of bytecode dispatch. In this paper we want to explain how the traces that are -produced by our meta-tracing JIT can be optimized to also remove some of the +produced by PyPy's tracing JIT can be optimized to also remove some of the overhead more closely associated to dynamic languages, such as boxing overhead and type dispatching. To understand the problem more closely, we analyze the occurring object lifetimes in Section~\ref{sec:lifetimes}. The most important technique to achieve this is a form of escape analysis \cite{XXX} that we call \emph{virtual objects}, which is described in Section~\ref{sec:virtuals}. The -basic approach of virtual objects can then be extended to also be used for +goal of virtual objects is to remove allocations of temporary objects that have +a predictable lifetime and to optimize type dispatching in the process. + +The basic approach of virtual objects can then be extended to also be used for type-specializing the traces that are produced by the tracing JIT (Section~\ref{sec:crossloop}). In Section~\ref{sec:XXX} we describe some supporting techniques that are not central to the approach, but are needed to improve the results. The introduced techniques are evaluated in -Section~\ref{sec:Evaluation}. +Section~\ref{sec:Evaluation} using PyPy's Python interpreter as a case study. The contributions of this paper are: \begin{enumerate} - \item An efficient and effective escape analysis for a tracing JIT + \item An efficient and effective algorithm for removing objects allocations in a tracing JIT. \item XXX \end{enumerate} \section{Background} \label{sec:Background} +\subsection{PyPy} +\label{sub:PyPy} + +The work described in this paper was done in the context of the PyPy project +\cite{armin_rigo_pypys_2006}. PyPy is an environment where dynamic languages can +be implemented in a simple yet efficient way. The approach taken when +implementing a language with PyPy is to write an interpreter for the language in +\emph{RPython} \cite{davide_ancona_rpython:_2007}. RPython ("restricted Python") +is a subset of Python chosen in such a way, that type inference becomes +possible. The language interpreter can thus be translated with the help of +PyPy's tools into a VM on the C level. Because the interpreter is written at a +relatively high level, the language implementation is kept free of low-level +details, such as object layout, garbage collection or memory model. Those +aspects of the final VM are woven into the generated code during the translation +to C. + +The feature that makes PyPy more than a compiler with a runtime system is it's +support for automated JIT compiler generation \cite{bolz_tracing_2009}. During +the translation to C, PyPy's tools can generate a just-in-time compiler for the +language that the interpreter is implementing. This process is not fully +automatic, but needs to be guided by the language implementer by some +source-code hints. + \subsection{Tracing JIT Compilers} \label{sub:JIT_background} @@ -168,55 +194,6 @@ XXX object model and its reflection in traces (e.g. guard\_class before each method call) -\subsection{PyPy} -\label{sub:PyPy} - -\section{Object Lifetimes in a Tracing JIT} -\label{sec:lifetimes} - -% section Object Lifetimes in a Tracing JIT (end) - -To understand the problems that this paper is trying to solve some more, we -first need to understand various cases of object lifetimes that can occur in a -tracing JIT compiler. - -\begin{figure} -\includegraphics{figures/obj-lifetime.pdf} - -\caption{Object Lifetimes in a Trace} -\label{fig:lifetimes} -\end{figure} - -The figure shows a trace before optimization, together with the lifetime of -various kinds of objects created in the trace. It is executed from top to -bottom. At the bottom, a jump is used to execute the same loop another time. -For clarity, the figure shows two iterations of the loop. -The loop is executed until one of the guards in the trace fails, and the -execution is aborted. - -Some of the operations within this trace are \texttt{new} operations, which each create a -new instance of some class. These instances are used for a while, e.g. by -calling methods on them, reading and writing their fields. Some of these -instances escape, which means that they are stored in some globally accessible -place or are passed into a function. - -Together with the \texttt{new} operations, the figure shows the lifetimes of the -created objects. Objects in category 1 live for a while, and are then just not -used any more. The creation of these objects is removed by the -optimization described in the last section. - -Objects in category 2 live for a while and then escape. The optimization of the -last section deals with them too: the \texttt{new} that creates them and -the field accesses are deferred, until the point where the object escapes. - -The objects in category 3 and 4 are in principle like the objects in category 1 -and 2. They are created, live for a while, but are then passed as an argument -to the \texttt{jump} operation. In the next iteration they can either die (category -3) or escape (category 4). - -\section{Escape Analysis in a Tracing JIT} -\label{sec:virtuals} - \subsection{Running Example} For the purpose of this paper, we are going to use a very simple object @@ -368,6 +345,53 @@ a bit later. In the next section, we will see how this can be improved upon, using escape analysis. +\section{Object Lifetimes in a Tracing JIT} +\label{sec:lifetimes} + +% section Object Lifetimes in a Tracing JIT (end) + +To understand the problems that this paper is trying to solve some more, we +first need to understand various cases of object lifetimes that can occur in a +tracing JIT compiler. + +\begin{figure} +\includegraphics{figures/obj-lifetime.pdf} + +\caption{Object Lifetimes in a Trace} +\label{fig:lifetimes} +\end{figure} + +The figure shows a trace before optimization, together with the lifetime of +various kinds of objects created in the trace. It is executed from top to +bottom. At the bottom, a jump is used to execute the same loop another time. +For clarity, the figure shows two iterations of the loop. +The loop is executed until one of the guards in the trace fails, and the +execution is aborted. + +Some of the operations within this trace are \texttt{new} operations, which each create a +new instance of some class. These instances are used for a while, e.g. by +calling methods on them, reading and writing their fields. Some of these +instances escape, which means that they are stored in some globally accessible +place or are passed into a function. + +Together with the \texttt{new} operations, the figure shows the lifetimes of the +created objects. Objects in category 1 live for a while, and are then just not +used any more. The creation of these objects is removed by the +optimization described in the last section. + +Objects in category 2 live for a while and then escape. The optimization of the +last section deals with them too: the \texttt{new} that creates them and +the field accesses are deferred, until the point where the object escapes. + +The objects in category 3 and 4 are in principle like the objects in category 1 +and 2. They are created, live for a while, but are then passed as an argument +to the \texttt{jump} operation. In the next iteration they can either die (category +3) or escape (category 4). + +\section{Escape Analysis in a Tracing JIT} +\label{sec:virtuals} + + \subsection{Virtual Objects} The main insight to improve the code shown in the last section is that some of From afa at codespeak.net Sun Sep 26 21:51:51 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Sun, 26 Sep 2010 21:51:51 +0200 (CEST) Subject: [pypy-svn] r77390 - pypy/branch/fast-forward/lib-python/modified-2.7.0 Message-ID: <20100926195151.21618282BAD@codespeak.net> Author: afa Date: Sun Sep 26 21:51:48 2010 New Revision: 77390 Added: pypy/branch/fast-forward/lib-python/modified-2.7.0/inspect.py - copied, changed from r77358, pypy/branch/fast-forward/lib-python/2.7.0/inspect.py Log: PyPy builtin functions have a func_code with co_argcount and co_varnames. Let inspect.getargs work with them. (same diffs as with 2.5.2) Copied: pypy/branch/fast-forward/lib-python/modified-2.7.0/inspect.py (from r77358, pypy/branch/fast-forward/lib-python/2.7.0/inspect.py) ============================================================================== --- pypy/branch/fast-forward/lib-python/2.7.0/inspect.py (original) +++ pypy/branch/fast-forward/lib-python/modified-2.7.0/inspect.py Sun Sep 26 21:51:48 2010 @@ -746,8 +746,15 @@ 'varargs' and 'varkw' are the names of the * and ** arguments or None.""" if not iscode(co): - raise TypeError('{!r} is not a code object'.format(co)) + if hasattr(len, 'func_code') and type(co) is type(len.func_code): + # PyPy extension: built-in function objects have a func_code too. + # There is no co_code on it, but co_argcount and co_varnames and + # co_flags are present. + pass + else: + raise TypeError('{!r} is not a code object'.format(co)) + code = getattr(co, 'co_code', '') nargs = co.co_argcount names = co.co_varnames args = list(names[:nargs]) @@ -757,12 +764,12 @@ for i in range(nargs): if args[i][:1] in ('', '.'): stack, remain, count = [], [], [] - while step < len(co.co_code): - op = ord(co.co_code[step]) + while step < len(code): + op = ord(code[step]) step = step + 1 if op >= dis.HAVE_ARGUMENT: opname = dis.opname[op] - value = ord(co.co_code[step]) + ord(co.co_code[step+1])*256 + value = ord(code[step]) + ord(code[step+1])*256 step = step + 2 if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'): remain.append(value) @@ -809,7 +816,9 @@ if ismethod(func): func = func.im_func - if not isfunction(func): + if not (isfunction(func) or + isbuiltin(func) and hasattr(func, 'func_code')): + # PyPy extension: this works for built-in functions too raise TypeError('{!r} is not a Python function'.format(func)) args, varargs, varkw = getargs(func.func_code) return ArgSpec(args, varargs, varkw, func.func_defaults) From afa at codespeak.net Sun Sep 26 22:13:36 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Sun, 26 Sep 2010 22:13:36 +0200 (CEST) Subject: [pypy-svn] r77391 - pypy/branch/fast-forward/lib-python Message-ID: <20100926201336.768BF282BFA@codespeak.net> Author: afa Date: Sun Sep 26 22:13:34 2010 New Revision: 77391 Modified: pypy/branch/fast-forward/lib-python/TODO Log: Update TODO list for 2.7 Modified: pypy/branch/fast-forward/lib-python/TODO ============================================================================== --- pypy/branch/fast-forward/lib-python/TODO (original) +++ pypy/branch/fast-forward/lib-python/TODO Sun Sep 26 22:13:34 2010 @@ -1,6 +1,9 @@ TODO list for 2.7.0 =================== +Probably easy tasks +------------------- + - Missing builtin: bytes = str - Missing builtin: next = space.next(w_obj) @@ -13,8 +16,27 @@ assert eval('a', None, dict(a=42)) == 42 +- Missing (float|int).(imag|real) + +- Missing complex.__trunc__ + +- Missing thread._count() + +- Mark some tests as "implementation specific":: + + @test_support.cpython_only + +Longer tasks +------------ + - Implement the _io module. At least _io.FileIO, and have io.py import everything from _pyio.py - Finish _multiprocessing +More difficult issues +--------------------- + +- In socket.py, """The implementation currently relies on reference counting to + close the underlying socket object.""" + From afa at codespeak.net Sun Sep 26 22:22:39 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Sun, 26 Sep 2010 22:22:39 +0200 (CEST) Subject: [pypy-svn] r77392 - pypy/branch/fast-forward/pypy Message-ID: <20100926202239.D42CB282BFA@codespeak.net> Author: afa Date: Sun Sep 26 22:22:38 2010 New Revision: 77392 Modified: pypy/branch/fast-forward/pypy/conftest.py Log: Fill the TinyObjSpace with a few builtin types. This fixes AppDirect tests in test_newformat.py. Modified: pypy/branch/fast-forward/pypy/conftest.py ============================================================================== --- pypy/branch/fast-forward/pypy/conftest.py (original) +++ pypy/branch/fast-forward/pypy/conftest.py Sun Sep 26 22:22:38 2010 @@ -132,6 +132,10 @@ py.test.skip("cannot runappdirect test: space needs %s = %s, "\ "while pypy-c was built with %s" % (key, value, has)) + for name in ('int', 'long', 'str', 'unicode'): + setattr(self, 'w_' + name, eval(name)) + + def appexec(self, args, body): body = body.lstrip() assert body.startswith('(') From afa at codespeak.net Sun Sep 26 22:41:46 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Sun, 26 Sep 2010 22:41:46 +0200 (CEST) Subject: [pypy-svn] r77393 - in pypy/branch/fast-forward: lib-python pypy/module/__builtin__ pypy/module/__builtin__/test Message-ID: <20100926204146.5E210282BAD@codespeak.net> Author: afa Date: Sun Sep 26 22:41:44 2010 New Revision: 77393 Modified: pypy/branch/fast-forward/lib-python/TODO pypy/branch/fast-forward/pypy/module/__builtin__/__init__.py pypy/branch/fast-forward/pypy/module/__builtin__/operation.py pypy/branch/fast-forward/pypy/module/__builtin__/test/test_builtin.py Log: Implement missing __builtin__.next() Modified: pypy/branch/fast-forward/lib-python/TODO ============================================================================== --- pypy/branch/fast-forward/lib-python/TODO (original) +++ pypy/branch/fast-forward/lib-python/TODO Sun Sep 26 22:41:44 2010 @@ -6,8 +6,6 @@ - Missing builtin: bytes = str -- Missing builtin: next = space.next(w_obj) - - Missing builtin: bytearray (possibly reuse module.__pypy__.bytebuffer) - Octal literals: 0o777 Modified: pypy/branch/fast-forward/pypy/module/__builtin__/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/__builtin__/__init__.py (original) +++ pypy/branch/fast-forward/pypy/module/__builtin__/__init__.py Sun Sep 26 22:41:44 2010 @@ -84,6 +84,7 @@ 'delattr' : 'operation.delattr', 'hasattr' : 'operation.hasattr', 'iter' : 'operation.iter', + 'next' : 'operation.next', 'id' : 'operation.id', 'intern' : 'operation.intern', 'callable' : 'operation.callable', Modified: pypy/branch/fast-forward/pypy/module/__builtin__/operation.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/__builtin__/operation.py (original) +++ pypy/branch/fast-forward/pypy/module/__builtin__/operation.py Sun Sep 26 22:41:44 2010 @@ -185,6 +185,17 @@ else: return iter_sentinel(space, w_collection_or_callable, w_sentinel) +def next(space, w_iterator, w_default=NoneNotWrapped): + """next(iterator[, default]) +Return the next item from the iterator. If default is given and the iterator +is exhausted, it is returned instead of raising StopIteration.""" + try: + return space.next(w_iterator) + except OperationError, e: + if w_default is not None and e.match(space, space.w_StopIteration): + return w_default + raise + def ord(space, w_val): """Return the integer ordinal of a character.""" return space.ord(w_val) Modified: pypy/branch/fast-forward/pypy/module/__builtin__/test/test_builtin.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/__builtin__/test/test_builtin.py (original) +++ pypy/branch/fast-forward/pypy/module/__builtin__/test/test_builtin.py Sun Sep 26 22:41:44 2010 @@ -196,6 +196,26 @@ enum = enumerate(range(5), 2) assert list(enum) == zip(range(2, 7), range(5)) + def test_next(self): + x = iter(['a', 'b', 'c']) + assert next(x) == 'a' + assert next(x) == 'b' + assert next(x) == 'c' + raises(StopIteration, next, x) + assert next(x, 42) == 42 + + def test_next__next__(self): + class Counter: + def __init__(self): + self.count = 0 + def next(self): + self.count += 1 + return self.count + x = Counter() + assert next(x) == 1 + assert next(x) == 2 + assert next(x) == 3 + def test_xrange_args(self): ## # xrange() attributes are deprecated and were removed in Python 2.3. ## x = xrange(2) From cfbolz at codespeak.net Sun Sep 26 22:43:32 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Sun, 26 Sep 2010 22:43:32 +0200 (CEST) Subject: [pypy-svn] r77394 - pypy/extradoc/talk/pepm2011 Message-ID: <20100926204332.9F2A0282BAD@codespeak.net> Author: cfbolz Date: Sun Sep 26 22:43:30 2010 New Revision: 77394 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: more in the brackground section about tracing JITs Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Sun Sep 26 22:43:30 2010 @@ -54,9 +54,9 @@ \title{Escape Analysis and Specialization in a Tracing JIT} -\authorinfo{Carl Friedrich Bolz \and Armin Rigo \and Antion Cuni \and Maciek Fija?kowski} - {Heinrich-Heine-Universit?t D?sseldorf, STUPS Group, Germany} - {cfbolz at gmx.de} +\authorinfo{Carl Friedrich Bolz \and Antion Cuni \and Maciek Fija?kowski \and Samuele Pedroni \and Armin Rigo} + {Heinrich-Heine-Universit?t D?sseldorf, STUPS Group, Germany XXX} + {cfbolz at gmx.de XXX} %\numberofauthors{3} %\author{ @@ -129,7 +129,7 @@ into an efficient VM that also contains a just-in-time compiler. This compiler is automatically generated from the interpreter using partial-evaluation-like techniques \cite{bolz_tracing_2009}. The PyPy project and its approach to -tracing JIT compilers is described in Section~\ref{sec:Background} +tracing JIT compilers is described in Section~\ref{sec:Background}. The tracing JIT approach that the PyPy project is taking removes the overhead of bytecode dispatch. In this paper we want to explain how the traces that are @@ -173,20 +173,65 @@ relatively high level, the language implementation is kept free of low-level details, such as object layout, garbage collection or memory model. Those aspects of the final VM are woven into the generated code during the translation -to C. +to C. XXX languages that are done using PyPy The feature that makes PyPy more than a compiler with a runtime system is it's support for automated JIT compiler generation \cite{bolz_tracing_2009}. During the translation to C, PyPy's tools can generate a just-in-time compiler for the language that the interpreter is implementing. This process is not fully automatic, but needs to be guided by the language implementer by some -source-code hints. +source-code hints. Semi-automatically generating a JIT compiler has many advantages +over writing one manually, which is an error-prone and tedious process. The +generated JIT has the same semantics as the interpreter by construction, and all +languages implemented using PyPy benefit from improvements to the JIT generator. +The JIT that is produced by PyPy's JIT generator is a \emph{tracing JIT +compiler}, a concept which will be explained in more detail in the next section. \subsection{Tracing JIT Compilers} \label{sub:JIT_background} +Tracing JITs are a recently more popular approach to write just-in-time +compilers for dynamic languages \cite{XXX}. Their origins lie in the Dynamo +project, that used a tracing approach to optimize machine code using execution +traces \cite{XXX}. They were then adapted to be used for a very light-weight +Java VM \cite{XXX} and afterwards used in several implementations of dynamic +languages, such as JavaScript \cite{XXX}, Lua \cite{XXX} and now Python via +PyPy. + +The core idea of tracing JITs is to focus the optimization effort of the JIT +compiler on the hot paths of the core loops of the program and to just use an +interpreter for the less commonly executed parts. VMs that use a tracing JIT are +thus mixed-mode execution environments, they contain both an interpreter and a +JIT compiler. By default the interpreter is used to execute the program, doing +some light-weight profiling at the same time. This profiling is used to identify +the hot loops of the program. If a hot loop is found in that way, the +interpreter enters a special \emph{tracing mode}. In this tracing mode, the +interpreter records all operations that it is executing while running one +iteration of the hot loop. This history of executed operations of one loop is +called a \emph{trace}. Because the trace corresponds to one iteration of a loop, +it always ends with a jump to its own beginning. + +This trace of operations is then the basis of the generated code. The trace is +optimized in some ways, and then turned into machine code. Generating machine +code is simple, because the traces are linear and the operations are very close +to machine level. The trace corresponds to one concrete execution of a loop, +therefore the code generated from it is only one possible path through the loop. +To make sure that the trace is maintaining the correct semantics, it contains a +\emph{guard} at all places where the execution could have diverged from the +path. Those guards check the assumptions under which execution can stay on the +trace. As an example, if a loop contains an \texttt{if} statement, the trace +will contain the execution of one of the paths only, which is the path that was +taken during the production of the trace. The trace will also contain a guard +that checks that the condition of the \texttt{if} statement is true, because if +it isn't, the rest of the trace is not valid. + +When generating machine code, every guard is be turned into a quick check to +see whether the assumption still holds. When such a guard is hit during the +execution of the machine code and the assumption does not hold, the execution of +the machine code is stopped, and interpreter continues to run from that point +on. These guards are the only mechanism to stop the execution of a trace, the +loop end condition also takes the form of a guard. -traces and bridges arguments to traces From cfbolz at codespeak.net Sun Sep 26 22:49:38 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Sun, 26 Sep 2010 22:49:38 +0200 (CEST) Subject: [pypy-svn] r77395 - pypy/extradoc/talk/pepm2011 Message-ID: <20100926204938.DBF42282BAD@codespeak.net> Author: cfbolz Date: Sun Sep 26 22:49:37 2010 New Revision: 77395 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: whoops Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Sun Sep 26 22:49:37 2010 @@ -54,7 +54,7 @@ \title{Escape Analysis and Specialization in a Tracing JIT} -\authorinfo{Carl Friedrich Bolz \and Antion Cuni \and Maciek Fija?kowski \and Samuele Pedroni \and Armin Rigo} +\authorinfo{Carl Friedrich Bolz \and Antonio Cuni \and Maciek Fija?kowski \and Samuele Pedroni \and Armin Rigo} {Heinrich-Heine-Universit?t D?sseldorf, STUPS Group, Germany XXX} {cfbolz at gmx.de XXX} From afa at codespeak.net Sun Sep 26 23:22:47 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Sun, 26 Sep 2010 23:22:47 +0200 (CEST) Subject: [pypy-svn] r77396 - in pypy/branch/fast-forward: lib-python pypy/module/_ast/test Message-ID: <20100926212247.8D91C282BAD@codespeak.net> Author: afa Date: Sun Sep 26 23:22:46 2010 New Revision: 77396 Modified: pypy/branch/fast-forward/lib-python/TODO pypy/branch/fast-forward/pypy/module/_ast/test/test_ast.py Log: TODO: AST objects must be picklable Modified: pypy/branch/fast-forward/lib-python/TODO ============================================================================== --- pypy/branch/fast-forward/lib-python/TODO (original) +++ pypy/branch/fast-forward/lib-python/TODO Sun Sep 26 23:22:46 2010 @@ -24,6 +24,12 @@ @test_support.cpython_only +Medium tasks +------------ + +- Ast objects should be picklable, see in pypy/module/_ast/test/test_ast.py: + test_pickle() + Longer tasks ------------ Modified: pypy/branch/fast-forward/pypy/module/_ast/test/test_ast.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_ast/test/test_ast.py (original) +++ pypy/branch/fast-forward/pypy/module/_ast/test/test_ast.py Sun Sep 26 23:22:46 2010 @@ -168,9 +168,22 @@ def test_future(self): mod = self.get_ast("from __future__ import with_statement") compile(mod, "", "exec") - mod = self.get_ast(""""I'm a docstring."\n + mod = self.get_ast(""""I am a docstring."\n from __future__ import generators""") compile(mod, "", "exec") mod = self.get_ast("from __future__ import with_statement; import y; " \ "from __future__ import nested_scopes") raises(SyntaxError, compile, mod, "", "exec") + + def test_pickle(self): + skip("XXX implement me") + import pickle + mod = self.get_ast("if y: x = 4") + co = compile(mod, "", "exec") + + s = pickle.dumps(mod) + mod2 = pickle.loads(s) + ns = {"y" : 1} + co2 = compile(mod2, "", "exec") + exec co2 in ns + assert ns["x"] == 4 From afa at codespeak.net Sun Sep 26 23:53:30 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Sun, 26 Sep 2010 23:53:30 +0200 (CEST) Subject: [pypy-svn] r77397 - in pypy/branch/fast-forward: lib-python pypy/module/thread pypy/module/thread/test Message-ID: <20100926215330.37A37282BAD@codespeak.net> Author: afa Date: Sun Sep 26 23:53:28 2010 New Revision: 77397 Modified: pypy/branch/fast-forward/lib-python/TODO pypy/branch/fast-forward/pypy/module/thread/__init__.py pypy/branch/fast-forward/pypy/module/thread/os_thread.py pypy/branch/fast-forward/pypy/module/thread/test/test_thread.py Log: Implement thread._count(), needed by many tests. Modified: pypy/branch/fast-forward/lib-python/TODO ============================================================================== --- pypy/branch/fast-forward/lib-python/TODO (original) +++ pypy/branch/fast-forward/lib-python/TODO Sun Sep 26 23:53:28 2010 @@ -18,8 +18,6 @@ - Missing complex.__trunc__ -- Missing thread._count() - - Mark some tests as "implementation specific":: @test_support.cpython_only Modified: pypy/branch/fast-forward/pypy/module/thread/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/thread/__init__.py (original) +++ pypy/branch/fast-forward/pypy/module/thread/__init__.py Sun Sep 26 23:53:28 2010 @@ -14,6 +14,7 @@ 'start_new': 'os_thread.start_new_thread', # obsolete syn. 'get_ident': 'os_thread.get_ident', 'stack_size': 'os_thread.stack_size', + '_count': 'os_thread._count', 'allocate_lock': 'os_lock.allocate_lock', 'allocate': 'os_lock.allocate_lock', # obsolete synonym 'LockType': 'os_lock.getlocktype(space)', Modified: pypy/branch/fast-forward/pypy/module/thread/os_thread.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/thread/os_thread.py (original) +++ pypy/branch/fast-forward/pypy/module/thread/os_thread.py Sun Sep 26 23:53:28 2010 @@ -59,6 +59,8 @@ # theoretically nicer, but comes with messy memory management issues. # This is much more straightforward. + nbthreads = 0 + # The following lock is held whenever the fields # 'bootstrapper.w_callable' and 'bootstrapper.args' are in use. lock = None @@ -80,12 +82,14 @@ space = bootstrapper.space w_callable = bootstrapper.w_callable args = bootstrapper.args + bootstrapper.nbthreads += 1 bootstrapper.release() # run! space.threadlocals.enter_thread(space) try: bootstrapper.run(space, w_callable, args) finally: + bootstrapper.nbthreads -= 1 # clean up space.threadlocals to remove the ExecutionContext # entry corresponding to the current thread try: @@ -206,3 +210,15 @@ raise wrap_thread_error(space, "setting stack size not supported") return space.wrap(old_size) stack_size.unwrap_spec = [ObjSpace, int] + +def _count(space): + """_count() -> integer +Return the number of currently running Python threads, excluding +the main thread. The returned number comprises all threads created +through `start_new_thread()` as well as `threading.Thread`, and not +yet finished. + +This function is meant for internal and specialized purposes only. +In most applications `threading.enumerate()` should be used instead.""" + return space.wrap(bootstrapper.nbthreads) + Modified: pypy/branch/fast-forward/pypy/module/thread/test/test_thread.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/thread/test/test_thread.py (original) +++ pypy/branch/fast-forward/pypy/module/thread/test/test_thread.py Sun Sep 26 23:53:28 2010 @@ -39,6 +39,20 @@ self.waitfor(lambda: feedback) assert feedback == [42] + def test_thread_count(self): + import thread, time + feedback = [] + please_start = [] + def f(): + feedback.append(42) + self.waitfor(lambda: please_start) + assert thread._count() == 0 + thread.start_new_thread(f, ()) + self.waitfor(lambda: feedback) + assert thread._count() == 1 + please_start.append(1) # trigger + # XXX joining a thread seems difficult at applevel. + def test_start_new_thread_args(self): import thread def f(): From fijal at codespeak.net Mon Sep 27 10:34:54 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Mon, 27 Sep 2010 10:34:54 +0200 (CEST) Subject: [pypy-svn] r77399 - pypy/extradoc/talk/pepm2011 Message-ID: <20100927083454.2B7E9282BAD@codespeak.net> Author: fijal Date: Mon Sep 27 10:34:52 2010 New Revision: 77399 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: use more official form Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Mon Sep 27 10:34:52 2010 @@ -54,7 +54,7 @@ \title{Escape Analysis and Specialization in a Tracing JIT} -\authorinfo{Carl Friedrich Bolz \and Antonio Cuni \and Maciek Fija?kowski \and Samuele Pedroni \and Armin Rigo} +\authorinfo{Carl Friedrich Bolz \and Antonio Cuni \and Maciej Fija?kowski \and Samuele Pedroni \and Armin Rigo} {Heinrich-Heine-Universit?t D?sseldorf, STUPS Group, Germany XXX} {cfbolz at gmx.de XXX} From arigo at codespeak.net Mon Sep 27 11:46:07 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 27 Sep 2010 11:46:07 +0200 (CEST) Subject: [pypy-svn] r77400 - in pypy/branch/smaller-writebarrier/pypy: rpython/lltypesystem translator/c/test Message-ID: <20100927094607.0A0DB282C02@codespeak.net> Author: arigo Date: Mon Sep 27 11:46:06 2010 New Revision: 77400 Modified: pypy/branch/smaller-writebarrier/pypy/rpython/lltypesystem/llarena.py pypy/branch/smaller-writebarrier/pypy/translator/c/test/test_lltyped.py Log: Have the arena malloc and free functions map directly to malloc() and free() in C; and a minimal test. Modified: pypy/branch/smaller-writebarrier/pypy/rpython/lltypesystem/llarena.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/rpython/lltypesystem/llarena.py (original) +++ pypy/branch/smaller-writebarrier/pypy/rpython/lltypesystem/llarena.py Mon Sep 27 11:46:06 2010 @@ -472,8 +472,13 @@ clear_large_memory_chunk = llmemory.raw_memclear +llimpl_malloc = rffi.llexternal('malloc', [lltype.Signed], llmemory.Address, + sandboxsafe=True, _nowrapper=True) +llimpl_free = rffi.llexternal('free', [llmemory.Address], lltype.Void, + sandboxsafe=True, _nowrapper=True) + def llimpl_arena_malloc(nbytes, zero): - addr = llmemory.raw_malloc(nbytes) + addr = llimpl_malloc(nbytes) if zero and bool(addr): clear_large_memory_chunk(addr, nbytes) return addr @@ -483,11 +488,8 @@ llfakeimpl=arena_malloc, sandboxsafe=True) -def llimpl_arena_free(arena_addr): - # NB. minimark.py assumes that arena_free() is actually just a raw_free(). - llmemory.raw_free(arena_addr) register_external(arena_free, [llmemory.Address], None, 'll_arena.arena_free', - llimpl=llimpl_arena_free, + llimpl=llimpl_free, llfakeimpl=arena_free, sandboxsafe=True) Modified: pypy/branch/smaller-writebarrier/pypy/translator/c/test/test_lltyped.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/translator/c/test/test_lltyped.py (original) +++ pypy/branch/smaller-writebarrier/pypy/translator/c/test/test_lltyped.py Mon Sep 27 11:46:06 2010 @@ -783,6 +783,17 @@ res = fn() assert res == 42 + def test_llarena(self): + from pypy.rpython.lltypesystem import llmemory, llarena + # + def f(): + a = llarena.arena_malloc(800, False) + llarena.arena_reset(a, 800, 2) + llarena.arena_free(a) + # + fn = self.getcompiled(f, []) + fn() + def test_padding_in_prebuilt_struct(self): from pypy.rpython.lltypesystem import rffi from pypy.rpython.tool import rffi_platform From arigo at codespeak.net Mon Sep 27 13:08:17 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 27 Sep 2010 13:08:17 +0200 (CEST) Subject: [pypy-svn] r77401 - in pypy/branch/smaller-writebarrier/pypy: rlib rpython/memory/gc/test Message-ID: <20100927110817.B6A94282C02@codespeak.net> Author: arigo Date: Mon Sep 27 13:08:16 2010 New Revision: 77401 Modified: pypy/branch/smaller-writebarrier/pypy/rlib/rstring.py pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/test/test_minimark.py Log: Kill this test which does not make sense any more. Restore the default of 100 in rstring.py. Modified: pypy/branch/smaller-writebarrier/pypy/rlib/rstring.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/rlib/rstring.py (original) +++ pypy/branch/smaller-writebarrier/pypy/rlib/rstring.py Mon Sep 27 13:08:16 2010 @@ -46,9 +46,7 @@ # -------------- public API --------------------------------- -# the following number is the maximum size of an RPython unicode -# string that goes into the nursery of the minimark GC. -INIT_SIZE = 56 +INIT_SIZE = 100 # XXX tweak class AbstractStringBuilder(object): def __init__(self, init_size=INIT_SIZE): Modified: pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/test/test_minimark.py ============================================================================== --- pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/test/test_minimark.py (original) +++ pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/test/test_minimark.py Mon Sep 27 13:08:16 2010 @@ -5,26 +5,6 @@ # Note that most tests are in test_direct.py. -def test_stringbuilder_default_initsize_is_small(): - # Check that pypy.rlib.rstring.INIT_SIZE is short enough to let - # the allocated object be considered as a "small" object. - # Otherwise it would not be allocated in the nursery at all, - # which is kind of bad (and also prevents shrink_array() from - # being useful). - from pypy.rlib.rstring import INIT_SIZE - from pypy.rpython.lltypesystem.rstr import STR, UNICODE - # - size_gc_header = llmemory.raw_malloc_usage( - llmemory.sizeof(llmemory.Address)) - # - size1 = llmemory.raw_malloc_usage(llmemory.sizeof(STR, INIT_SIZE)) - size1 = size_gc_header + size1 - assert size1 <= MiniMarkGC.TRANSLATION_PARAMS["small_request_threshold"] - # - size2 = llmemory.raw_malloc_usage(llmemory.sizeof(UNICODE, INIT_SIZE)) - size2 = size_gc_header + size2 - assert size2 <= MiniMarkGC.TRANSLATION_PARAMS["small_request_threshold"] - def test_card_marking_words_for_length(): gc = MiniMarkGC(None, card_page_indices=128) assert gc.card_page_shift == 7 From fijal at codespeak.net Mon Sep 27 13:32:39 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Mon, 27 Sep 2010 13:32:39 +0200 (CEST) Subject: [pypy-svn] r77402 - pypy/extradoc/planning Message-ID: <20100927113239.9C7F0282C02@codespeak.net> Author: fijal Date: Mon Sep 27 13:32:38 2010 New Revision: 77402 Modified: pypy/extradoc/planning/jit.txt Log: 2 tasks Modified: pypy/extradoc/planning/jit.txt ============================================================================== --- pypy/extradoc/planning/jit.txt (original) +++ pypy/extradoc/planning/jit.txt Mon Sep 27 13:32:38 2010 @@ -27,6 +27,13 @@ - maybe refactor a bit the x86 backend, particularly the register allocation +- work more on visualization tools for traces or any profiler (unusable outside + the core group) + +- think about having different bytecode for "xyz %s" % stuff when left side + is a compile time constant (and call unrolled version of string formatting + loop in this case). + OPTIMIZATIONS ------------- From arigo at codespeak.net Mon Sep 27 13:43:34 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 27 Sep 2010 13:43:34 +0200 (CEST) Subject: [pypy-svn] r77403 - in pypy/trunk/pypy: jit/backend/llgraph jit/backend/llsupport jit/backend/llsupport/test jit/backend/test jit/backend/x86 jit/metainterp rlib rpython/lltypesystem rpython/memory rpython/memory/gc rpython/memory/gc/test rpython/memory/gctransform rpython/memory/test translator/c/test Message-ID: <20100927114334.4FA07282C02@codespeak.net> Author: arigo Date: Mon Sep 27 13:43:31 2010 New Revision: 77403 Added: pypy/trunk/pypy/rpython/memory/gc/inspector.py - copied unchanged from r77400, pypy/branch/smaller-writebarrier/pypy/rpython/memory/gc/inspector.py Removed: pypy/trunk/pypy/rpython/memory/gc/inspect.py Modified: pypy/trunk/pypy/jit/backend/llgraph/llimpl.py pypy/trunk/pypy/jit/backend/llsupport/gc.py pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py pypy/trunk/pypy/jit/backend/test/runner_test.py pypy/trunk/pypy/jit/backend/x86/assembler.py pypy/trunk/pypy/jit/backend/x86/regalloc.py pypy/trunk/pypy/jit/metainterp/resoperation.py pypy/trunk/pypy/rlib/rstring.py pypy/trunk/pypy/rpython/lltypesystem/llarena.py pypy/trunk/pypy/rpython/memory/gc/base.py pypy/trunk/pypy/rpython/memory/gc/generation.py pypy/trunk/pypy/rpython/memory/gc/minimark.py pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py pypy/trunk/pypy/rpython/memory/gc/test/test_minimark.py pypy/trunk/pypy/rpython/memory/gctransform/framework.py pypy/trunk/pypy/rpython/memory/gcwrapper.py pypy/trunk/pypy/rpython/memory/test/test_gc.py pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py pypy/trunk/pypy/translator/c/test/test_lltyped.py Log: Merge branch/smaller-writebarrier. Reduce the number of arguments passed to the write barrier, and tweak the minimark GC a bit more. Modified: pypy/trunk/pypy/jit/backend/llgraph/llimpl.py ============================================================================== --- pypy/trunk/pypy/jit/backend/llgraph/llimpl.py (original) +++ pypy/trunk/pypy/jit/backend/llgraph/llimpl.py Mon Sep 27 13:43:31 2010 @@ -129,7 +129,7 @@ 'arraylen_gc' : (('ref',), 'int'), 'call' : (('ref', 'varargs'), 'intorptr'), 'call_assembler' : (('varargs',), 'intorptr'), - 'cond_call_gc_wb' : (('ptr', 'ptr'), None), + 'cond_call_gc_wb' : (('ptr',), None), 'oosend' : (('varargs',), 'intorptr'), 'oosend_pure' : (('varargs',), 'intorptr'), 'guard_true' : (('bool',), None), @@ -810,7 +810,7 @@ FLOAT: 0.0} return d[calldescr.typeinfo] - def op_cond_call_gc_wb(self, descr, a, b): + def op_cond_call_gc_wb(self, descr, a): py.test.skip("cond_call_gc_wb not supported") def op_oosend(self, descr, obj, *args): Modified: pypy/trunk/pypy/jit/backend/llsupport/gc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/llsupport/gc.py (original) +++ pypy/trunk/pypy/jit/backend/llsupport/gc.py Mon Sep 27 13:43:31 2010 @@ -404,7 +404,7 @@ self.GC_MALLOC_BASIC = lltype.Ptr(lltype.FuncType( [lltype.Signed, lltype.Signed], llmemory.GCREF)) self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( - [llmemory.Address, llmemory.Address], lltype.Void)) + [llmemory.Address], lltype.Void)) self.write_barrier_descr = WriteBarrierDescr(self) # def malloc_array(itemsize, tid, num_elem): @@ -550,8 +550,7 @@ # the GC, and call it immediately llop1 = self.llop1 funcptr = llop1.get_write_barrier_failing_case(self.WB_FUNCPTR) - funcptr(llmemory.cast_ptr_to_adr(gcref_struct), - llmemory.cast_ptr_to_adr(gcref_newptr)) + funcptr(llmemory.cast_ptr_to_adr(gcref_struct)) def rewrite_assembler(self, cpu, operations): # Perform two kinds of rewrites in parallel: @@ -590,22 +589,24 @@ v = op.getarg(1) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) + self._gen_write_barrier(newops, op.getarg(0)) op = op.copy_and_change(rop.SETFIELD_RAW) # ---------- write barrier for SETARRAYITEM_GC ---------- if op.getopnum() == rop.SETARRAYITEM_GC: v = op.getarg(2) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) + # XXX detect when we should produce a + # write_barrier_from_array + self._gen_write_barrier(newops, op.getarg(0)) op = op.copy_and_change(rop.SETARRAYITEM_RAW) # ---------- newops.append(op) del operations[:] operations.extend(newops) - def _gen_write_barrier(self, newops, v_base, v_value): - args = [v_base, v_value] + def _gen_write_barrier(self, newops, v_base): + args = [v_base] newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, descr=self.write_barrier_descr)) Modified: pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py (original) +++ pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py Mon Sep 27 13:43:31 2010 @@ -141,8 +141,8 @@ repr(offset_to_length), p)) return p - def _write_barrier_failing_case(self, adr_struct, adr_newptr): - self.record.append(('barrier', adr_struct, adr_newptr)) + def _write_barrier_failing_case(self, adr_struct): + self.record.append(('barrier', adr_struct)) def get_write_barrier_failing_case(self, FPTRTYPE): return llhelper(FPTRTYPE, self._write_barrier_failing_case) @@ -238,7 +238,6 @@ s_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) r_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, r) s_adr = llmemory.cast_ptr_to_adr(s) - r_adr = llmemory.cast_ptr_to_adr(r) # s_hdr.tid &= ~gc_ll_descr.GCClass.JIT_WB_IF_FLAG gc_ll_descr.do_write_barrier(s_gcref, r_gcref) @@ -246,7 +245,7 @@ # s_hdr.tid |= gc_ll_descr.GCClass.JIT_WB_IF_FLAG gc_ll_descr.do_write_barrier(s_gcref, r_gcref) - assert self.llop1.record == [('barrier', s_adr, r_adr)] + assert self.llop1.record == [('barrier', s_adr)] def test_gen_write_barrier(self): gc_ll_descr = self.gc_ll_descr @@ -254,13 +253,11 @@ # newops = [] v_base = BoxPtr() - v_value = BoxPtr() - gc_ll_descr._gen_write_barrier(newops, v_base, v_value) + gc_ll_descr._gen_write_barrier(newops, v_base) assert llop1.record == [] assert len(newops) == 1 assert newops[0].getopnum() == rop.COND_CALL_GC_WB assert newops[0].getarg(0) == v_base - assert newops[0].getarg(1) == v_value assert newops[0].result is None wbdescr = newops[0].getdescr() assert isinstance(wbdescr.jit_wb_if_flag, int) @@ -360,7 +357,6 @@ # assert operations[0].getopnum() == rop.COND_CALL_GC_WB assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value assert operations[0].result is None # assert operations[1].getopnum() == rop.SETFIELD_RAW @@ -384,7 +380,6 @@ # assert operations[0].getopnum() == rop.COND_CALL_GC_WB assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value assert operations[0].result is None # assert operations[1].getopnum() == rop.SETARRAYITEM_RAW Modified: pypy/trunk/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/trunk/pypy/jit/backend/test/runner_test.py (original) +++ pypy/trunk/pypy/jit/backend/test/runner_test.py Mon Sep 27 13:43:31 2010 @@ -1406,12 +1406,12 @@ assert not excvalue def test_cond_call_gc_wb(self): - def func_void(a, b): - record.append((a, b)) + def func_void(a): + record.append(a) record = [] # S = lltype.GcStruct('S', ('tid', lltype.Signed)) - FUNC = self.FuncType([lltype.Ptr(S), lltype.Signed], lltype.Void) + FUNC = self.FuncType([lltype.Ptr(S)], lltype.Void) func_ptr = llhelper(lltype.Ptr(FUNC), func_void) funcbox = self.get_funcbox(self.cpu, func_ptr) class WriteBarrierDescr(AbstractDescr): @@ -1432,10 +1432,10 @@ sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) del record[:] self.execute_operation(rop.COND_CALL_GC_WB, - [BoxPtr(sgcref), ConstInt(-2121)], + [BoxPtr(sgcref)], 'void', descr=WriteBarrierDescr()) if cond: - assert record == [(s, -2121)] + assert record == [s] else: assert record == [] Modified: pypy/trunk/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/assembler.py (original) +++ pypy/trunk/pypy/jit/backend/x86/assembler.py Mon Sep 27 13:43:31 2010 @@ -1765,6 +1765,7 @@ jz_location = self.mc.get_relative_pos() # the following is supposed to be the slow path, so whenever possible # we choose the most compact encoding over the most efficient one. + # XXX improve a bit, particularly for IS_X86_64. for i in range(len(arglocs)-1, -1, -1): loc = arglocs[i] if isinstance(loc, RegLoc): @@ -1777,12 +1778,11 @@ self.mc.PUSH_i32(loc.getint()) if IS_X86_64: - # We clobber these registers to pass the arguments, but that's + # We clobber this register to pass the arguments, but that's # okay, because consider_cond_call_gc_wb makes sure that any # caller-save registers with values in them are present in arglocs, # so they are saved on the stack above and restored below self.mc.MOV_rs(edi.value, 0) - self.mc.MOV_rs(esi.value, 8) # misaligned stack in the call, but it's ok because the write barrier # is not going to call anything more. Also, this assumes that the Modified: pypy/trunk/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/trunk/pypy/jit/backend/x86/regalloc.py Mon Sep 27 13:43:31 2010 @@ -696,13 +696,9 @@ def consider_cond_call_gc_wb(self, op): assert op.result is None args = op.getarglist() - loc_newvalue = self.rm.make_sure_var_in_reg(op.getarg(1), args) - # ^^^ we force loc_newvalue in a reg (unless it's a Const), - # because it will be needed anyway by the following setfield_gc. - # It avoids loading it twice from the memory. loc_base = self.rm.make_sure_var_in_reg(op.getarg(0), args, imm_fine=False) - arglocs = [loc_base, loc_newvalue] + arglocs = [loc_base] # add eax, ecx and edx as extra "arguments" to ensure they are # saved and restored. Fish in self.rm to know which of these # registers really need to be saved (a bit of a hack). Moreover, Modified: pypy/trunk/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/resoperation.py (original) +++ pypy/trunk/pypy/jit/metainterp/resoperation.py Mon Sep 27 13:43:31 2010 @@ -456,7 +456,7 @@ 'UNICODESETITEM/3', 'NEWUNICODE/1', #'RUNTIMENEW/1', # ootype operation - 'COND_CALL_GC_WB/2d', # [objptr, newvalue] (for the write barrier) + 'COND_CALL_GC_WB/1d', # [objptr] (for the write barrier) 'DEBUG_MERGE_POINT/1', # debugging only 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend Modified: pypy/trunk/pypy/rlib/rstring.py ============================================================================== --- pypy/trunk/pypy/rlib/rstring.py (original) +++ pypy/trunk/pypy/rlib/rstring.py Mon Sep 27 13:43:31 2010 @@ -46,9 +46,7 @@ # -------------- public API --------------------------------- -# the following number is the maximum size of an RPython unicode -# string that goes into the nursery of the minimark GC. -INIT_SIZE = 56 +INIT_SIZE = 100 # XXX tweak class AbstractStringBuilder(object): def __init__(self, init_size=INIT_SIZE): Modified: pypy/trunk/pypy/rpython/lltypesystem/llarena.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/llarena.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/llarena.py Mon Sep 27 13:43:31 2010 @@ -472,8 +472,13 @@ clear_large_memory_chunk = llmemory.raw_memclear +llimpl_malloc = rffi.llexternal('malloc', [lltype.Signed], llmemory.Address, + sandboxsafe=True, _nowrapper=True) +llimpl_free = rffi.llexternal('free', [llmemory.Address], lltype.Void, + sandboxsafe=True, _nowrapper=True) + def llimpl_arena_malloc(nbytes, zero): - addr = llmemory.raw_malloc(nbytes) + addr = llimpl_malloc(nbytes) if zero and bool(addr): clear_large_memory_chunk(addr, nbytes) return addr @@ -483,11 +488,8 @@ llfakeimpl=arena_malloc, sandboxsafe=True) -def llimpl_arena_free(arena_addr): - # NB. minimark.py assumes that arena_free() is actually just a raw_free(). - llmemory.raw_free(arena_addr) register_external(arena_free, [llmemory.Address], None, 'll_arena.arena_free', - llimpl=llimpl_arena_free, + llimpl=llimpl_free, llfakeimpl=arena_free, sandboxsafe=True) Modified: pypy/trunk/pypy/rpython/memory/gc/base.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/base.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/base.py Mon Sep 27 13:43:31 2010 @@ -76,7 +76,7 @@ def set_root_walker(self, root_walker): self.root_walker = root_walker - def write_barrier(self, newvalue, addr_struct): + def write_barrier(self, addr_struct): pass def statistics(self, index): Modified: pypy/trunk/pypy/rpython/memory/gc/generation.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/generation.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/generation.py Mon Sep 27 13:43:31 2010 @@ -321,7 +321,7 @@ addr = pointer.address[0] newaddr = self.copy(addr) pointer.address[0] = newaddr - self.write_into_last_generation_obj(obj, newaddr) + self.write_into_last_generation_obj(obj) # ____________________________________________________________ # Implementation of nursery-only collections @@ -452,11 +452,12 @@ # "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS - def write_barrier(self, newvalue, addr_struct): + def write_barrier(self, addr_struct): if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS: - self.remember_young_pointer(addr_struct, newvalue) + self.remember_young_pointer(addr_struct) def _setup_wb(self): + DEBUG = self.DEBUG # The purpose of attaching remember_young_pointer to the instance # instead of keeping it as a regular method is to help the JIT call it. # Additionally, it makes the code in write_barrier() marginally smaller @@ -464,33 +465,24 @@ # For x86, there is also an extra requirement: when the JIT calls # remember_young_pointer(), it assumes that it will not touch the SSE # registers, so it does not save and restore them (that's a *hack*!). - def remember_young_pointer(addr_struct, addr): + def remember_young_pointer(addr_struct): #llop.debug_print(lltype.Void, "\tremember_young_pointer", # addr_struct, "<-", addr) - ll_assert(not self.is_in_nursery(addr_struct), - "nursery object with GCFLAG_NO_YOUNG_PTRS") - # if we have tagged pointers around, we first need to check whether - # we have valid pointer here, otherwise we can do it after the - # is_in_nursery check - if (self.config.taggedpointers and - not self.is_valid_gc_object(addr)): - return - if self.is_in_nursery(addr): - self.old_objects_pointing_to_young.append(addr_struct) - self.header(addr_struct).tid &= ~GCFLAG_NO_YOUNG_PTRS - elif (not self.config.taggedpointers and - not self.is_valid_gc_object(addr)): - return - self.write_into_last_generation_obj(addr_struct, addr) + if DEBUG: + ll_assert(not self.is_in_nursery(addr_struct), + "nursery object with GCFLAG_NO_YOUNG_PTRS") + self.old_objects_pointing_to_young.append(addr_struct) + self.header(addr_struct).tid &= ~GCFLAG_NO_YOUNG_PTRS + self.write_into_last_generation_obj(addr_struct) remember_young_pointer._dont_inline_ = True self.remember_young_pointer = remember_young_pointer - def write_into_last_generation_obj(self, addr_struct, addr): + def write_into_last_generation_obj(self, addr_struct): objhdr = self.header(addr_struct) if objhdr.tid & GCFLAG_NO_HEAP_PTRS: - if not self.is_last_generation(addr): - objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS - self.last_generation_root_objects.append(addr_struct) + objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS + self.last_generation_root_objects.append(addr_struct) + write_into_last_generation_obj._always_inline_ = True def assume_young_pointers(self, addr_struct): objhdr = self.header(addr_struct) Modified: pypy/trunk/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/minimark.py Mon Sep 27 13:43:31 2010 @@ -1,6 +1,7 @@ import sys from pypy.rpython.lltypesystem import lltype, llmemory, llarena, llgroup from pypy.rpython.lltypesystem.lloperation import llop +from pypy.rpython.lltypesystem.llmemory import raw_malloc_usage from pypy.rpython.memory.gc.base import GCBase, MovingGCBase from pypy.rpython.memory.gc import minimarkpage, base, generation from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE @@ -92,7 +93,8 @@ # PYPY_GC_NURSERY and fall back to half the size of # the L2 cache. For 'major_collection_threshold' it will look # it up in the env var PYPY_GC_MAJOR_COLLECT. It also sets - # 'max_heap_size' to PYPY_GC_MAX. + # 'max_heap_size' to PYPY_GC_MAX. Finally, PYPY_GC_MIN sets + # the minimal value of 'next_major_collection_threshold'. "read_from_env": True, # The size of the nursery. Note that this is only used as a @@ -108,10 +110,10 @@ "arena_size": 65536*WORD, # The maximum size of an object allocated compactly. All objects - # that are larger are just allocated with raw_malloc(). The value - # chosen here is enough for a unicode string of length 56 (on 64-bits) - # or 60 (on 32-bits). See rlib.rstring.INIT_SIZE. - "small_request_threshold": 256-WORD, + # that are larger are just allocated with raw_malloc(). Note that + # the size limit for being first allocated in the nursery is much + # larger; see below. + "small_request_threshold": 35*WORD, # Full collection threshold: after a major collection, we record # the total size consumed; and after every minor collection, if the @@ -125,7 +127,16 @@ # in regular arrays of pointers; more in arrays whose items are # larger. A value of 0 disables card marking. "card_page_indices": 128, - "card_page_indices_min": 800, # minimum number of indices for cards + + # Objects whose total size is at least 'large_object' bytes are + # allocated out of the nursery immediately. If the object + # has GC pointers in its varsized part, we use instead the + # higher limit 'large_object_gcptrs'. The idea is that + # separately allocated objects are allocated immediately "old" + # and it's not good to have too many pointers from old to young + # objects. + "large_object": 1600*WORD, + "large_object_gcptrs": 8250*WORD, } def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE, @@ -136,7 +147,8 @@ small_request_threshold=5*WORD, major_collection_threshold=2.5, card_page_indices=0, - card_page_indices_min=None, + large_object=8*WORD, + large_object_gcptrs=10*WORD, ArenaCollectionClass=None): MovingGCBase.__init__(self, config, chunk_size) assert small_request_threshold % WORD == 0 @@ -145,16 +157,23 @@ self.small_request_threshold = small_request_threshold self.major_collection_threshold = major_collection_threshold self.num_major_collects = 0 + self.min_heap_size = 0.0 self.max_heap_size = 0.0 self.max_heap_size_already_raised = False # self.card_page_indices = card_page_indices if self.card_page_indices > 0: - self.card_page_indices_min = card_page_indices_min self.card_page_shift = 0 while (1 << self.card_page_shift) < self.card_page_indices: self.card_page_shift += 1 # + # 'large_object' and 'large_object_gcptrs' limit how big objects + # can be in the nursery, so they give a lower bound on the allowed + # size of the nursery. + self.nonlarge_max = large_object - 1 + self.nonlarge_gcptrs_max = large_object_gcptrs - 1 + assert self.nonlarge_max <= self.nonlarge_gcptrs_max + # self.nursery = NULL self.nursery_free = NULL self.nursery_top = NULL @@ -218,7 +237,7 @@ else: # defaultsize = self.nursery_size - minsize = 18 * self.small_request_threshold + minsize = 2 * (self.nonlarge_gcptrs_max + 1) self.nursery_size = minsize self.allocate_nursery() # @@ -229,28 +248,37 @@ newsize = generation.estimate_best_nursery_size() if newsize <= 0: newsize = defaultsize + newsize = max(newsize, minsize) # major_coll = base.read_float_from_env('PYPY_GC_MAJOR_COLLECT') if major_coll >= 1.0: self.major_collection_threshold = major_coll # + min_heap_size = base.read_uint_from_env('PYPY_GC_MIN') + if min_heap_size > 0: + self.min_heap_size = float(min_heap_size) + else: + # defaults to 8 times the nursery + self.min_heap_size = newsize * 8 + # max_heap_size = base.read_uint_from_env('PYPY_GC_MAX') if max_heap_size > 0: self.max_heap_size = float(max_heap_size) # self.minor_collection() # to empty the nursery llarena.arena_free(self.nursery) - self.nursery_size = max(newsize, minsize) + self.nursery_size = newsize self.allocate_nursery() def allocate_nursery(self): debug_start("gc-set-nursery-size") debug_print("nursery size:", self.nursery_size) - # the start of the nursery: we actually allocate a tiny bit more for + # the start of the nursery: we actually allocate a bit more for # the nursery than really needed, to simplify pointer arithmetic - # in malloc_fixedsize_clear(). - extra = self.small_request_threshold + # in malloc_fixedsize_clear(). The few extra pages are never used + # anyway so it doesn't even count. + extra = self.nonlarge_gcptrs_max + 1 self.nursery = llarena.arena_malloc(self.nursery_size + extra, True) if not self.nursery: raise MemoryError("cannot allocate nursery") @@ -258,37 +286,54 @@ self.nursery_free = self.nursery # the end of the nursery: self.nursery_top = self.nursery + self.nursery_size - # initialize the threshold, a bit arbitrarily - self.next_major_collection_threshold = ( - self.nursery_size * self.major_collection_threshold) + # initialize the threshold + self.min_heap_size = max(self.min_heap_size, self.nursery_size * + self.major_collection_threshold) + self.set_major_threshold_from(0.0) debug_stop("gc-set-nursery-size") + def set_major_threshold_from(self, threshold): + # Set the next_major_collection_threshold. + if threshold < self.min_heap_size: + threshold = self.min_heap_size + # + if self.max_heap_size > 0.0 and threshold > self.max_heap_size: + threshold = self.max_heap_size + bounded = True + else: + bounded = False + # + self.next_major_collection_threshold = threshold + return bounded + def malloc_fixedsize_clear(self, typeid, size, can_collect=True, needs_finalizer=False, contains_weakptr=False): ll_assert(can_collect, "!can_collect") size_gc_header = self.gcheaderbuilder.size_gc_header totalsize = size_gc_header + size - rawtotalsize = llmemory.raw_malloc_usage(totalsize) + rawtotalsize = raw_malloc_usage(totalsize) # # If the object needs a finalizer, ask for a rawmalloc. # The following check should be constant-folded. if needs_finalizer: ll_assert(not contains_weakptr, "'needs_finalizer' and 'contains_weakptr' both specified") - result = self.malloc_with_finalizer(typeid, totalsize) + obj = self.external_malloc(typeid, 0) + self.objects_with_finalizers.append(obj) # - # If totalsize is greater than small_request_threshold, ask for - # a rawmalloc. The following check should be constant-folded. - elif rawtotalsize > self.small_request_threshold: + # If totalsize is greater than nonlarge_max (which should never be + # the case in practice), ask for a rawmalloc. The following check + # should be constant-folded. + elif rawtotalsize > self.nonlarge_max: ll_assert(not contains_weakptr, "'contains_weakptr' specified for a large object") - result = self._external_malloc(typeid, totalsize) + obj = self.external_malloc(typeid, 0) # else: # If totalsize is smaller than minimal_size_in_nursery, round it # up. The following check should also be constant-folded. - min_size = llmemory.raw_malloc_usage(self.minimal_size_in_nursery) + min_size = raw_malloc_usage(self.minimal_size_in_nursery) if rawtotalsize < min_size: totalsize = rawtotalsize = min_size # @@ -306,8 +351,10 @@ # If it is a weakref, record it (check constant-folded). if contains_weakptr: self.young_objects_with_weakrefs.append(result+size_gc_header) + # + obj = result + size_gc_header # - return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF) + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) def malloc_varsize_clear(self, typeid, length, size, itemsize, @@ -315,32 +362,41 @@ ll_assert(can_collect, "!can_collect") size_gc_header = self.gcheaderbuilder.size_gc_header nonvarsize = size_gc_header + size - try: - varsize = ovfcheck(itemsize * length) - totalsize = ovfcheck(nonvarsize + varsize) - except OverflowError: - raise MemoryError # - # If totalsize is greater than small_request_threshold, ask for - # a rawmalloc. - if llmemory.raw_malloc_usage(totalsize) > self.small_request_threshold: - result = self._external_malloc_cardmark(typeid, totalsize, length) + # Compute the maximal length that makes the object still + # below 'nonlarge_max'. All the following logic is usually + # constant-folded because self.nonlarge_max, size and itemsize + # are all constants (the arguments are constant due to + # inlining) and self.has_gcptr_in_varsize() is constant-folded. + if self.has_gcptr_in_varsize(typeid): + nonlarge_max = self.nonlarge_gcptrs_max + else: + nonlarge_max = self.nonlarge_max + + if not raw_malloc_usage(itemsize): + too_many_items = raw_malloc_usage(nonvarsize) > nonlarge_max + else: + maxlength = nonlarge_max - raw_malloc_usage(nonvarsize) + maxlength = maxlength // raw_malloc_usage(itemsize) + too_many_items = length > maxlength + + if too_many_items: + # + # If the total size of the object would be larger than + # 'nonlarge_max', then allocate it externally. + obj = self.external_malloc(typeid, length) # else: - # Round the size up to the next multiple of WORD. Note that - # this is done only if totalsize <= self.small_request_threshold, - # i.e. it cannot overflow, and it keeps the property that - # totalsize <= self.small_request_threshold. + # With the above checks we know now that totalsize cannot be more + # than 'nonlarge_max'; in particular, the + and * cannot overflow. + totalsize = nonvarsize + itemsize * length totalsize = llarena.round_up_for_allocation(totalsize) - ll_assert(llmemory.raw_malloc_usage(totalsize) <= - self.small_request_threshold, - "round_up_for_allocation() rounded up too much?") # # 'totalsize' should contain at least the GC header and # the length word, so it should never be smaller than # 'minimal_size_in_nursery' - ll_assert(llmemory.raw_malloc_usage(totalsize) >= - llmemory.raw_malloc_usage(self.minimal_size_in_nursery), + ll_assert(raw_malloc_usage(totalsize) >= + raw_malloc_usage(self.minimal_size_in_nursery), "malloc_varsize_clear(): totalsize < minimalsize") # # Get the memory from the nursery. If there is not enough space @@ -353,10 +409,12 @@ # Build the object. llarena.arena_reserve(result, totalsize) self.init_gc_object(result, typeid, flags=0) + # + # Set the length and return the object. + obj = result + size_gc_header + (obj + offset_to_length).signed[0] = length # - # Set the length and return the object. - (result + size_gc_header + offset_to_length).signed[0] = length - return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF) + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) def collect(self, gen=1): @@ -389,105 +447,112 @@ collect_and_reserve._dont_inline_ = True - def _full_collect_if_needed(self, reserving_size): - reserving_size = llmemory.raw_malloc_usage(reserving_size) - if (float(self.get_total_memory_used()) + reserving_size > - self.next_major_collection_threshold): - self.minor_collection() - self.major_collection(reserving_size) - - def _external_malloc(self, typeid, totalsize): - """Allocate a large object using raw_malloc().""" - return self._external_malloc_cardmark(typeid, totalsize, 0) - - - def _external_malloc_cardmark(self, typeid, totalsize, length): - """Allocate a large object using raw_malloc(), possibly as an - object with card marking enabled, if its length is large enough. - 'length' can be specified as 0 if the object is not varsized.""" + def external_malloc(self, typeid, length): + """Allocate a large object using the ArenaCollection or + raw_malloc(), possibly as an object with card marking enabled, + if it has gc pointers in its var-sized part. 'length' should be + specified as 0 if the object is not varsized. The returned + object is fully initialized and zero-filled.""" + # + # Compute the total size, carefully checking for overflows. + size_gc_header = self.gcheaderbuilder.size_gc_header + nonvarsize = size_gc_header + self.fixed_size(typeid) + if length == 0: + # this includes the case of fixed-size objects, for which we + # should not even ask for the varsize_item_sizes(). + totalsize = nonvarsize + else: + itemsize = self.varsize_item_sizes(typeid) + try: + varsize = ovfcheck(itemsize * length) + totalsize = ovfcheck(nonvarsize + varsize) + except OverflowError: + raise MemoryError # # If somebody calls this function a lot, we must eventually # force a full collection. - self._full_collect_if_needed(totalsize) + if (float(self.get_total_memory_used()) + raw_malloc_usage(totalsize) > + self.next_major_collection_threshold): + self.minor_collection() + self.major_collection(raw_malloc_usage(totalsize)) # - # Check if we need to introduce the card marker bits area. - if (self.card_page_indices <= 0 # <- this check is constant-folded - or length < self.card_page_indices_min # <- must be large enough - or not self.has_gcptr_in_varsize(typeid)): # <- must contain ptrs + # Check if the object would fit in the ArenaCollection. + if raw_malloc_usage(totalsize) <= self.small_request_threshold: # - # In these cases, we don't want a card marker bits area. - cardheadersize = 0 + # Yes. Round up 'totalsize' (it cannot overflow and it + # must remain <= self.small_request_threshold.) + totalsize = llarena.round_up_for_allocation(totalsize) + ll_assert(raw_malloc_usage(totalsize) <= + self.small_request_threshold, + "rounding up made totalsize > small_request_threshold") + # + # Allocate from the ArenaCollection and clear the memory returned. + result = self.ac.malloc(totalsize) + llmemory.raw_memclear(result, totalsize) extra_flags = 0 # else: - # Reserve N extra words containing card bits before the object. - extra_words = self.card_marking_words_for_length(length) - cardheadersize = WORD * extra_words - extra_flags = GCFLAG_HAS_CARDS - # - allocsize = cardheadersize + llmemory.raw_malloc_usage(totalsize) - # - # Allocate the object using arena_malloc(), which we assume here - # is just the same as raw_malloc(), but allows the extra flexibility - # of saying that we have extra words in the header. - arena = llarena.arena_malloc(allocsize, False) - if not arena: - raise MemoryError("cannot allocate large object") - # - # Clear it using method 2 of llarena.arena_reset(), which is the - # same as just a raw_memclear(). - llarena.arena_reset(arena, allocsize, 2) - # - # Reserve the card mark as a list of single bytes - # (the loop is empty in C). - i = 0 - while i < cardheadersize: - llarena.arena_reserve(arena + i, llmemory.sizeof(lltype.Char)) - i += 1 - # - # Initialize the object. - result = arena + cardheadersize - llarena.arena_reserve(result, totalsize) - self.init_gc_object(result, typeid, GCFLAG_NO_YOUNG_PTRS | extra_flags) - # - # Record the newly allocated object and its size. - size_gc_header = self.gcheaderbuilder.size_gc_header - self.rawmalloced_total_size += llmemory.raw_malloc_usage(totalsize) - self.rawmalloced_objects.append(result + size_gc_header) - return result - _external_malloc_cardmark._dont_inline_ = True - - - def _malloc_nonmovable(self, typeid, totalsize): - """Allocate an object non-movable.""" - # - rawtotalsize = llmemory.raw_malloc_usage(totalsize) - if rawtotalsize > self.small_request_threshold: + # No, so proceed to allocate it externally with raw_malloc(). + # Check if we need to introduce the card marker bits area. + if (self.card_page_indices <= 0 # <- this check is constant-folded + or not self.has_gcptr_in_varsize(typeid) or + raw_malloc_usage(totalsize) <= self.nonlarge_gcptrs_max): + # + # In these cases, we don't want a card marker bits area. + # This case also includes all fixed-size objects. + cardheadersize = 0 + extra_flags = 0 + # + else: + # Reserve N extra words containing card bits before the object. + extra_words = self.card_marking_words_for_length(length) + cardheadersize = WORD * extra_words + extra_flags = GCFLAG_HAS_CARDS + # + # Detect very rare cases of overflows + if raw_malloc_usage(totalsize) > (sys.maxint - (WORD-1) + - cardheadersize): + raise MemoryError("rare case of overflow") + # + # Now we know that the following computations cannot overflow. + # Note that round_up_for_allocation() is also needed to get the + # correct number added to 'rawmalloced_total_size'. + allocsize = (cardheadersize + raw_malloc_usage( + llarena.round_up_for_allocation(totalsize))) + # + # Allocate the object using arena_malloc(), which we assume here + # is just the same as raw_malloc(), but allows the extra + # flexibility of saying that we have extra words in the header. + arena = llarena.arena_malloc(allocsize, False) + if not arena: + raise MemoryError("cannot allocate large object") + # + # Clear it using method 2 of llarena.arena_reset(), which is the + # same as just a raw_memclear(). This also clears the card mark + # bits, if any. + llarena.arena_reset(arena, allocsize, 2) + # + # Reserve the card mark bits as a list of single bytes + # (the loop is empty in C). + i = 0 + while i < cardheadersize: + llarena.arena_reserve(arena + i, llmemory.sizeof(lltype.Char)) + i += 1 # - # The size asked for is too large for the ArenaCollection. - return self._external_malloc(typeid, totalsize) - # - totalsize = llarena.round_up_for_allocation(totalsize) - # - # If somebody calls _malloc_nonmovable() a lot, we must eventually - # force a full collection. - self._full_collect_if_needed(totalsize) - # - # Ask the ArenaCollection to do the malloc. - result = self.ac.malloc(totalsize) - llmemory.raw_memclear(result, totalsize) - self.init_gc_object(result, typeid, GCFLAG_NO_YOUNG_PTRS) - return result - - - def malloc_with_finalizer(self, typeid, totalsize): - """Allocate an object with a finalizer.""" + # Reserve the actual object. (This is also a no-op in C). + result = arena + cardheadersize + llarena.arena_reserve(result, totalsize) + # + # Record the newly allocated object and its full malloced size. + self.rawmalloced_total_size += allocsize + self.rawmalloced_objects.append(result + size_gc_header) # - result = self._malloc_nonmovable(typeid, totalsize) - size_gc_header = self.gcheaderbuilder.size_gc_header - self.objects_with_finalizers.append(result + size_gc_header) - return result - malloc_with_finalizer._dont_inline_ = True + # Common code to fill the header and length of the object. + self.init_gc_object(result, typeid, GCFLAG_NO_YOUNG_PTRS | extra_flags) + if self.is_varsize(typeid): + offset_to_length = self.varsize_offset_to_length(typeid) + (result + size_gc_header + offset_to_length).signed[0] = length + return result + size_gc_header # ---------- @@ -529,37 +594,16 @@ def malloc_fixedsize_nonmovable(self, typeid): - """NOT_RPYTHON: not tested translated""" - size_gc_header = self.gcheaderbuilder.size_gc_header - totalsize = size_gc_header + self.fixed_size(typeid) - # - result = self._malloc_nonmovable(typeid, totalsize) - obj = result + size_gc_header + obj = self.external_malloc(typeid, 0) return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) def malloc_varsize_nonmovable(self, typeid, length): - size_gc_header = self.gcheaderbuilder.size_gc_header - nonvarsize = size_gc_header + self.fixed_size(typeid) - itemsize = self.varsize_item_sizes(typeid) - offset_to_length = self.varsize_offset_to_length(typeid) - try: - varsize = ovfcheck(itemsize * length) - totalsize = ovfcheck(nonvarsize + varsize) - except OverflowError: - raise MemoryError - # - result = self._malloc_nonmovable(typeid, totalsize) - obj = result + size_gc_header - (obj + offset_to_length).signed[0] = length + obj = self.external_malloc(typeid, length) return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) def malloc_nonmovable(self, typeid, length, zero): # helper for testing, same as GCBase.malloc - if self.is_varsize(typeid): - gcref = self.malloc_varsize_nonmovable(typeid, length) - else: - gcref = self.malloc_fixedsize_nonmovable(typeid) - return llmemory.cast_ptr_to_adr(gcref) + return self.external_malloc(typeid, length or 0) # None -> 0 # ---------- @@ -675,19 +719,19 @@ # "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS - def write_barrier(self, newvalue, addr_struct): + def write_barrier(self, addr_struct): if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS: - self.remember_young_pointer(addr_struct, newvalue) + self.remember_young_pointer(addr_struct) - def write_barrier_from_array(self, newvalue, addr_array, index): + def write_barrier_from_array(self, addr_array, index): if self.header(addr_array).tid & GCFLAG_NO_YOUNG_PTRS: if self.card_page_indices > 0: # <- constant-folded - self.remember_young_pointer_from_array(addr_array, index, - newvalue) + self.remember_young_pointer_from_array(addr_array, index) else: - self.remember_young_pointer(addr_array, newvalue) + self.remember_young_pointer(addr_array) def _init_writebarrier_logic(self): + DEBUG = self.DEBUG # The purpose of attaching remember_young_pointer to the instance # instead of keeping it as a regular method is to help the JIT call it. # Additionally, it makes the code in write_barrier() marginally smaller @@ -695,30 +739,22 @@ # For x86, there is also an extra requirement: when the JIT calls # remember_young_pointer(), it assumes that it will not touch the SSE # registers, so it does not save and restore them (that's a *hack*!). - def remember_young_pointer(addr_struct, addr): - # 'addr_struct' is the address of the object in which we write; - # 'addr' is the address that we write in 'addr_struct'. - ll_assert(not self.is_in_nursery(addr_struct), - "nursery object with GCFLAG_NO_YOUNG_PTRS") - # if we have tagged pointers around, we first need to check whether - # we have valid pointer here, otherwise we can do it after the - # is_in_nursery check - if (self.config.taggedpointers and - not self.is_valid_gc_object(addr)): - return - # - # Core logic: if the 'addr' is in the nursery, then we need + def remember_young_pointer(addr_struct): + # 'addr_struct' is the address of the object in which we write. + if DEBUG: + ll_assert(not self.is_in_nursery(addr_struct), + "nursery object with GCFLAG_NO_YOUNG_PTRS") + # + # We assume that what we are writing is a pointer to the nursery + # (and don't care for the fact that this new pointer may not + # actually point to the nursery, which seems ok). What we need is # to remove the flag GCFLAG_NO_YOUNG_PTRS and add the old object # to the list 'old_objects_pointing_to_young'. We know that # 'addr_struct' cannot be in the nursery, because nursery objects # never have the flag GCFLAG_NO_YOUNG_PTRS to start with. + self.old_objects_pointing_to_young.append(addr_struct) objhdr = self.header(addr_struct) - if self.is_in_nursery(addr): - self.old_objects_pointing_to_young.append(addr_struct) - objhdr.tid &= ~GCFLAG_NO_YOUNG_PTRS - elif (not self.config.taggedpointers and - not self.is_valid_gc_object(addr)): - return + objhdr.tid &= ~GCFLAG_NO_YOUNG_PTRS # # Second part: if 'addr_struct' is actually a prebuilt GC # object and it's the first time we see a write to it, we @@ -737,17 +773,16 @@ def _init_writebarrier_with_card_marker(self): - def remember_young_pointer_from_array(addr_array, index, addr): + def remember_young_pointer_from_array(addr_array, index): # 'addr_array' is the address of the object in which we write, # which must have an array part; 'index' is the index of the - # item that is (or contains) the pointer that we write; - # 'addr' is the address that we write in the array. + # item that is (or contains) the pointer that we write. objhdr = self.header(addr_array) if objhdr.tid & GCFLAG_HAS_CARDS == 0: # # no cards, use default logic. The 'nocard_logic()' is just # 'remember_young_pointer()', but forced to be inlined here. - nocard_logic(addr_array, addr) + nocard_logic(addr_array) return # # 'addr_array' is a raw_malloc'ed array with card markers @@ -764,22 +799,13 @@ if byte & bitmask: return # - # As in remember_young_pointer, check if 'addr' is a valid - # pointer, in case it can be a tagged integer - if (self.config.taggedpointers and - not self.is_valid_gc_object(addr)): - return - # - # If the 'addr' is in the nursery, then we need to set the flag. - # Note that the following check is done after the bit check - # above, because it is expected that the "bit already set" - # situation is the most common. - if self.is_in_nursery(addr): - addr_byte.char[0] = chr(byte | bitmask) - # - if objhdr.tid & GCFLAG_CARDS_SET == 0: - self.old_objects_with_cards_set.append(addr_array) - objhdr.tid |= GCFLAG_CARDS_SET + # We set the flag (even if the newly written address does not + # actually point to the nursery -- like remember_young_pointer()). + addr_byte.char[0] = chr(byte | bitmask) + # + if objhdr.tid & GCFLAG_CARDS_SET == 0: + self.old_objects_with_cards_set.append(addr_array) + objhdr.tid |= GCFLAG_CARDS_SET nocard_logic = func_with_new_name(self.remember_young_pointer, 'remember_young_pointer_nocard') @@ -997,7 +1023,7 @@ if self.header(obj).tid & GCFLAG_HAS_SHADOW == 0: # # Common case: allocate a new nonmovable location for it. - newhdr = self.ac.malloc(totalsize) + newhdr = self._malloc_out_of_nursery(totalsize) # else: # The object has already a shadow. @@ -1035,6 +1061,33 @@ self.old_objects_pointing_to_young.append(newobj) + def _malloc_out_of_nursery(self, totalsize): + """Allocate non-movable memory for an object of the given + 'totalsize' that lives so far in the nursery.""" + if raw_malloc_usage(totalsize) <= self.small_request_threshold: + # most common path + return self.ac.malloc(totalsize) + else: + # for nursery objects that are not small + return self._malloc_out_of_nursery_nonsmall(totalsize) + _malloc_out_of_nursery._always_inline_ = True + + def _malloc_out_of_nursery_nonsmall(self, totalsize): + # 'totalsize' should be aligned. + ll_assert(raw_malloc_usage(totalsize) & (WORD-1) == 0, + "misaligned totalsize in _malloc_out_of_nursery_nonsmall") + # + arena = llarena.arena_malloc(raw_malloc_usage(totalsize), False) + if not arena: + raise MemoryError("cannot allocate object") + llarena.arena_reserve(arena, totalsize) + # + size_gc_header = self.gcheaderbuilder.size_gc_header + self.rawmalloced_total_size += raw_malloc_usage(totalsize) + self.rawmalloced_objects.append(arena + size_gc_header) + return arena + + # ---------- # Full collection @@ -1104,30 +1157,26 @@ # Set the threshold for the next major collection to be when we # have allocated 'major_collection_threshold' times more than # we currently have. - self.next_major_collection_threshold = ( + bounded = self.set_major_threshold_from( (self.get_total_memory_used() * self.major_collection_threshold) + reserving_size) # # Max heap size: gives an upper bound on the threshold. If we # already have at least this much allocated, raise MemoryError. - if (self.max_heap_size > 0.0 and - self.next_major_collection_threshold > self.max_heap_size): + if bounded and (float(self.get_total_memory_used()) + reserving_size >= + self.next_major_collection_threshold): # - self.next_major_collection_threshold = self.max_heap_size - if (float(self.get_total_memory_used()) + reserving_size >= - self.next_major_collection_threshold): - # - # First raise MemoryError, giving the program a chance to - # quit cleanly. It might still allocate in the nursery, - # which might eventually be emptied, triggering another - # major collect and (possibly) reaching here again with an - # even higher memory consumption. To prevent it, if it's - # the second time we are here, then abort the program. - if self.max_heap_size_already_raised: - llop.debug_fatalerror(lltype.Void, - "Using too much memory, aborting") - self.max_heap_size_already_raised = True - raise MemoryError + # First raise MemoryError, giving the program a chance to + # quit cleanly. It might still allocate in the nursery, + # which might eventually be emptied, triggering another + # major collect and (possibly) reaching here again with an + # even higher memory consumption. To prevent it, if it's + # the second time we are here, then abort the program. + if self.max_heap_size_already_raised: + llop.debug_fatalerror(lltype.Void, + "Using too much memory, aborting") + self.max_heap_size_already_raised = True + raise MemoryError # # At the end, we can execute the finalizers of the objects # listed in 'run_finalizers'. Note that this will typically do @@ -1159,8 +1208,7 @@ self.rawmalloced_objects.append(obj) else: totalsize = size_gc_header + self.get_size(obj) - rawtotalsize = llmemory.raw_malloc_usage(totalsize) - self.rawmalloced_total_size -= rawtotalsize + allocsize = raw_malloc_usage(totalsize) arena = llarena.getfakearenaaddress(obj - size_gc_header) # # Must also include the card marker area, if any @@ -1175,8 +1223,10 @@ length = (obj + offset_to_length).signed[0] extra_words = self.card_marking_words_for_length(length) arena -= extra_words * WORD + allocsize += extra_words * WORD # llarena.arena_free(arena) + self.rawmalloced_total_size -= allocsize # list.delete() @@ -1260,7 +1310,8 @@ else: size_gc_header = self.gcheaderbuilder.size_gc_header size = self.get_size(obj) - shadowhdr = self.ac.malloc(size_gc_header + size) + shadowhdr = self._malloc_out_of_nursery(size_gc_header + + size) # initialize to an invalid tid *without* GCFLAG_VISITED, # so that if the object dies before the next minor # collection, the shadow will stay around but be collected @@ -1454,7 +1505,7 @@ self.total_memory_used = 0 def malloc(self, size): - nsize = llmemory.raw_malloc_usage(size) + nsize = raw_malloc_usage(size) ll_assert(nsize > 0, "malloc: size is null or negative") ll_assert(nsize <= self.small_request_threshold,"malloc: size too big") ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned") Modified: pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py Mon Sep 27 13:43:31 2010 @@ -86,19 +86,17 @@ def write(self, p, fieldname, newvalue): if self.gc.needs_write_barrier: - newaddr = llmemory.cast_ptr_to_adr(newvalue) addr_struct = llmemory.cast_ptr_to_adr(p) - self.gc.write_barrier(newaddr, addr_struct) + self.gc.write_barrier(addr_struct) setattr(p, fieldname, newvalue) def writearray(self, p, index, newvalue): if self.gc.needs_write_barrier: - newaddr = llmemory.cast_ptr_to_adr(newvalue) addr_struct = llmemory.cast_ptr_to_adr(p) if hasattr(self.gc, 'write_barrier_from_array'): - self.gc.write_barrier_from_array(newaddr, addr_struct, index) + self.gc.write_barrier_from_array(addr_struct, index) else: - self.gc.write_barrier(newaddr, addr_struct) + self.gc.write_barrier(addr_struct) p[index] = newvalue def malloc(self, TYPE, n=None): @@ -507,8 +505,7 @@ for index, expected_x in nums.items(): assert a[index].x == expected_x self.stackroots.pop() - test_card_marker.GC_PARAMS = {"card_page_indices": 4, - "card_page_indices_min": 7} + test_card_marker.GC_PARAMS = {"card_page_indices": 4} class TestMiniMarkGCFull(DirectGCTest): from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass Modified: pypy/trunk/pypy/rpython/memory/gc/test/test_minimark.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/test/test_minimark.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/test/test_minimark.py Mon Sep 27 13:43:31 2010 @@ -5,26 +5,6 @@ # Note that most tests are in test_direct.py. -def test_stringbuilder_default_initsize_is_small(): - # Check that pypy.rlib.rstring.INIT_SIZE is short enough to let - # the allocated object be considered as a "small" object. - # Otherwise it would not be allocated in the nursery at all, - # which is kind of bad (and also prevents shrink_array() from - # being useful). - from pypy.rlib.rstring import INIT_SIZE - from pypy.rpython.lltypesystem.rstr import STR, UNICODE - # - size_gc_header = llmemory.raw_malloc_usage( - llmemory.sizeof(llmemory.Address)) - # - size1 = llmemory.raw_malloc_usage(llmemory.sizeof(STR, INIT_SIZE)) - size1 = size_gc_header + size1 - assert size1 <= MiniMarkGC.TRANSLATION_PARAMS["small_request_threshold"] - # - size2 = llmemory.raw_malloc_usage(llmemory.sizeof(UNICODE, INIT_SIZE)) - size2 = size_gc_header + size2 - assert size2 <= MiniMarkGC.TRANSLATION_PARAMS["small_request_threshold"] - def test_card_marking_words_for_length(): gc = MiniMarkGC(None, card_page_indices=128) assert gc.card_page_shift == 7 Modified: pypy/trunk/pypy/rpython/memory/gctransform/framework.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gctransform/framework.py (original) +++ pypy/trunk/pypy/rpython/memory/gctransform/framework.py Mon Sep 27 13:43:31 2010 @@ -139,7 +139,7 @@ def __init__(self, translator): from pypy.rpython.memory.gc.base import choose_gc_from_config from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP - from pypy.rpython.memory.gc import inspect + from pypy.rpython.memory.gc import inspector super(FrameworkGCTransformer, self).__init__(translator, inline=True) if hasattr(self, 'GC_PARAMS'): @@ -391,27 +391,27 @@ else: self.id_ptr = None - self.get_rpy_roots_ptr = getfn(inspect.get_rpy_roots, + self.get_rpy_roots_ptr = getfn(inspector.get_rpy_roots, [s_gc], rgc.s_list_of_gcrefs(), minimal_transform=False) - self.get_rpy_referents_ptr = getfn(inspect.get_rpy_referents, + self.get_rpy_referents_ptr = getfn(inspector.get_rpy_referents, [s_gc, s_gcref], rgc.s_list_of_gcrefs(), minimal_transform=False) - self.get_rpy_memory_usage_ptr = getfn(inspect.get_rpy_memory_usage, + self.get_rpy_memory_usage_ptr = getfn(inspector.get_rpy_memory_usage, [s_gc, s_gcref], annmodel.SomeInteger(), minimal_transform=False) - self.get_rpy_type_index_ptr = getfn(inspect.get_rpy_type_index, + self.get_rpy_type_index_ptr = getfn(inspector.get_rpy_type_index, [s_gc, s_gcref], annmodel.SomeInteger(), minimal_transform=False) - self.is_rpy_instance_ptr = getfn(inspect.is_rpy_instance, + self.is_rpy_instance_ptr = getfn(inspector.is_rpy_instance, [s_gc, s_gcref], annmodel.SomeBool(), minimal_transform=False) - self.dump_rpy_heap_ptr = getfn(inspect.dump_rpy_heap, + self.dump_rpy_heap_ptr = getfn(inspector.dump_rpy_heap, [s_gc, annmodel.SomeInteger()], annmodel.s_Bool, minimal_transform=False) @@ -426,7 +426,6 @@ if GCClass.needs_write_barrier: self.write_barrier_ptr = getfn(GCClass.write_barrier.im_func, [s_gc, - annmodel.SomeAddress(), annmodel.SomeAddress()], annmodel.s_None, inline=True) @@ -435,15 +434,13 @@ # func should not be a bound method, but a real function assert isinstance(func, types.FunctionType) self.write_barrier_failing_case_ptr = getfn(func, - [annmodel.SomeAddress(), - annmodel.SomeAddress()], + [annmodel.SomeAddress()], annmodel.s_None) func = getattr(GCClass, 'write_barrier_from_array', None) if func is not None: self.write_barrier_from_array_ptr = getfn(func.im_func, [s_gc, annmodel.SomeAddress(), - annmodel.SomeAddress(), annmodel.SomeInteger()], annmodel.s_None, inline=True) @@ -455,8 +452,7 @@ self.write_barrier_from_array_failing_case_ptr = \ getfn(func, [annmodel.SomeAddress(), - annmodel.SomeInteger(), - annmodel.SomeAddress()], + annmodel.SomeInteger()], annmodel.s_None) self.statistics_ptr = getfn(GCClass.statistics.im_func, [s_gc, annmodel.SomeInteger()], @@ -1023,8 +1019,6 @@ and not isinstance(v_newvalue, Constant) and v_struct.concretetype.TO._gckind == "gc" and hop.spaceop not in self.clean_sets): - v_newvalue = hop.genop("cast_ptr_to_adr", [v_newvalue], - resulttype = llmemory.Address) v_structaddr = hop.genop("cast_ptr_to_adr", [v_struct], resulttype = llmemory.Address) if (self.write_barrier_from_array_ptr is not None and @@ -1034,14 +1028,12 @@ assert v_index.concretetype == lltype.Signed hop.genop("direct_call", [self.write_barrier_from_array_ptr, self.c_const_gc, - v_newvalue, v_structaddr, v_index]) else: self.write_barrier_calls += 1 hop.genop("direct_call", [self.write_barrier_ptr, self.c_const_gc, - v_newvalue, v_structaddr]) hop.rename('bare_' + opname) Modified: pypy/trunk/pypy/rpython/memory/gcwrapper.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gcwrapper.py (original) +++ pypy/trunk/pypy/rpython/memory/gcwrapper.py Mon Sep 27 13:43:31 2010 @@ -94,7 +94,6 @@ assert (type(index) is int # <- fast path or lltype.typeOf(index) == lltype.Signed) self.gc.write_barrier_from_array( - llmemory.cast_ptr_to_adr(newvalue), llmemory.cast_ptr_to_adr(toplevelcontainer), index) wb = False @@ -102,7 +101,6 @@ # if wb: self.gc.write_barrier( - llmemory.cast_ptr_to_adr(newvalue), llmemory.cast_ptr_to_adr(toplevelcontainer)) llheap.setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue) Modified: pypy/trunk/pypy/rpython/memory/test/test_gc.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/test/test_gc.py (original) +++ pypy/trunk/pypy/rpython/memory/test/test_gc.py Mon Sep 27 13:43:31 2010 @@ -29,6 +29,7 @@ GC_CAN_MALLOC_NONMOVABLE = True GC_CAN_SHRINK_ARRAY = False GC_CAN_SHRINK_BIG_ARRAY = False + BUT_HOW_BIG_IS_A_BIG_STRING = 12 def setup_class(cls): cls._saved_logstate = py.log._getstate() @@ -495,7 +496,8 @@ # with larger numbers, it gets allocated outside the semispace # with some GCs. flag = self.GC_CAN_SHRINK_BIG_ARRAY - assert self.interpret(f, [12, 0, flag]) == 0x62024241 + bigsize = self.BUT_HOW_BIG_IS_A_BIG_STRING + assert self.interpret(f, [bigsize, 0, flag]) == 0x62024241 def test_tagged_simple(self): from pypy.rlib.objectmodel import UnboxedValue @@ -770,7 +772,7 @@ from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass GC_CAN_SHRINK_BIG_ARRAY = False GC_CAN_MALLOC_NONMOVABLE = True + BUT_HOW_BIG_IS_A_BIG_STRING = 11*WORD class TestMiniMarkGCCardMarking(TestMiniMarkGC): - GC_PARAMS = {'card_page_indices': 4, - 'card_page_indices_min': 10} + GC_PARAMS = {'card_page_indices': 4} Modified: pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py (original) +++ pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py Mon Sep 27 13:43:31 2010 @@ -1475,7 +1475,6 @@ 'arena_size': 64*WORD, 'small_request_threshold': 5*WORD, 'card_page_indices': 4, - 'card_page_indices_min': 10, } root_stack_depth = 200 Modified: pypy/trunk/pypy/translator/c/test/test_lltyped.py ============================================================================== --- pypy/trunk/pypy/translator/c/test/test_lltyped.py (original) +++ pypy/trunk/pypy/translator/c/test/test_lltyped.py Mon Sep 27 13:43:31 2010 @@ -783,6 +783,17 @@ res = fn() assert res == 42 + def test_llarena(self): + from pypy.rpython.lltypesystem import llmemory, llarena + # + def f(): + a = llarena.arena_malloc(800, False) + llarena.arena_reset(a, 800, 2) + llarena.arena_free(a) + # + fn = self.getcompiled(f, []) + fn() + def test_padding_in_prebuilt_struct(self): from pypy.rpython.lltypesystem import rffi from pypy.rpython.tool import rffi_platform From arigo at codespeak.net Mon Sep 27 13:43:54 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 27 Sep 2010 13:43:54 +0200 (CEST) Subject: [pypy-svn] r77404 - pypy/extradoc/planning Message-ID: <20100927114354.A7D78282C02@codespeak.net> Author: arigo Date: Mon Sep 27 13:43:53 2010 New Revision: 77404 Modified: pypy/extradoc/planning/jit.txt Log: Add a comment. Modified: pypy/extradoc/planning/jit.txt ============================================================================== --- pypy/extradoc/planning/jit.txt (original) +++ pypy/extradoc/planning/jit.txt Mon Sep 27 13:43:53 2010 @@ -92,6 +92,8 @@ - think out looking into functions or not, based on arguments, for example contains__Tuple should be unrolled if tuple is of constant length. HARD, blocked by the fact that we don't know constants soon enough + Also, an unrolled loop means several copies of the guards, which may + fail independently, leading to an exponential number of bridges - out-of-line guards (when an external change would invalidate existing pieces of assembler) From arigo at codespeak.net Mon Sep 27 13:44:57 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 27 Sep 2010 13:44:57 +0200 (CEST) Subject: [pypy-svn] r77405 - pypy/branch/smaller-writebarrier Message-ID: <20100927114457.B2818282C02@codespeak.net> Author: arigo Date: Mon Sep 27 13:44:56 2010 New Revision: 77405 Removed: pypy/branch/smaller-writebarrier/ Log: Remove merged branch. From arigo at codespeak.net Mon Sep 27 13:50:12 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 27 Sep 2010 13:50:12 +0200 (CEST) Subject: [pypy-svn] r77406 - pypy/branch/minimark-jit Message-ID: <20100927115012.10F2E282C02@codespeak.net> Author: arigo Date: Mon Sep 27 13:50:10 2010 New Revision: 77406 Added: pypy/branch/minimark-jit/ - copied from r77405, pypy/trunk/ Log: A quick branch to test and support the minimark GC with the JIT. From arigo at codespeak.net Mon Sep 27 14:02:28 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 27 Sep 2010 14:02:28 +0200 (CEST) Subject: [pypy-svn] r77407 - in pypy/branch/minimark-jit/pypy: jit/backend/llsupport jit/backend/llsupport/test jit/metainterp rpython/memory/gc Message-ID: <20100927120228.0E606282C02@codespeak.net> Author: arigo Date: Mon Sep 27 14:02:27 2010 New Revision: 77407 Modified: pypy/branch/minimark-jit/pypy/jit/backend/llsupport/gc.py pypy/branch/minimark-jit/pypy/jit/backend/llsupport/test/test_gc.py pypy/branch/minimark-jit/pypy/jit/metainterp/gc.py pypy/branch/minimark-jit/pypy/rpython/memory/gc/generation.py pypy/branch/minimark-jit/pypy/rpython/memory/gc/minimark.py Log: Fixes. Modified: pypy/branch/minimark-jit/pypy/jit/backend/llsupport/gc.py ============================================================================== --- pypy/branch/minimark-jit/pypy/jit/backend/llsupport/gc.py (original) +++ pypy/branch/minimark-jit/pypy/jit/backend/llsupport/gc.py Mon Sep 27 14:02:27 2010 @@ -133,7 +133,7 @@ # ____________________________________________________________ -# All code below is for the hybrid GC +# All code below is for the hybrid or minimark GC class GcRefList: @@ -167,7 +167,7 @@ def alloc_gcref_list(self, n): # Important: the GRREF_LISTs allocated are *non-movable*. This - # requires support in the gc (only the hybrid GC supports it so far). + # requires support in the gc (hybrid GC or minimark GC so far). if we_are_translated(): list = rgc.malloc_nonmovable(self.GCREF_LIST, n) assert list, "malloc_nonmovable failed!" @@ -350,8 +350,9 @@ self.translator = translator self.llop1 = llop1 - # we need the hybrid GC for GcRefList.alloc_gcref_list() to work - if gcdescr.config.translation.gc != 'hybrid': + # we need the hybrid or minimark GC for GcRefList.alloc_gcref_list() + # to work + if gcdescr.config.translation.gc not in ('hybrid', 'minimark'): raise NotImplementedError("--gc=%s not implemented with the JIT" % (gcdescr.config.translation.gc,)) @@ -382,8 +383,7 @@ self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO) (self.array_basesize, _, self.array_length_ofs) = \ symbolic.get_array_token(lltype.GcArray(lltype.Signed), True) - min_ns = self.GCClass.TRANSLATION_PARAMS['min_nursery_size'] - self.max_size_of_young_obj = self.GCClass.get_young_fixedsize(min_ns) + self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() # make a malloc function, with three arguments def malloc_basic(size, tid): Modified: pypy/branch/minimark-jit/pypy/jit/backend/llsupport/test/test_gc.py ============================================================================== --- pypy/branch/minimark-jit/pypy/jit/backend/llsupport/test/test_gc.py (original) +++ pypy/branch/minimark-jit/pypy/jit/backend/llsupport/test/test_gc.py Mon Sep 27 14:02:27 2010 @@ -149,11 +149,12 @@ class TestFramework: + gc = 'hybrid' def setup_method(self, meth): class config_: class translation: - gc = 'hybrid' + gc = self.gc gcrootfinder = 'asmgcc' gctransformer = 'framework' gcremovetypeptr = False @@ -387,3 +388,7 @@ assert operations[1].getarg(1) == v_index assert operations[1].getarg(2) == v_value assert operations[1].getdescr() == array_descr + + +class TestFrameworkMiniMark(TestFramework): + gc = 'minimark' Modified: pypy/branch/minimark-jit/pypy/jit/metainterp/gc.py ============================================================================== --- pypy/branch/minimark-jit/pypy/jit/metainterp/gc.py (original) +++ pypy/branch/minimark-jit/pypy/jit/metainterp/gc.py Mon Sep 27 14:02:27 2010 @@ -19,6 +19,9 @@ class GC_hybrid(GcDescription): malloc_zero_filled = True +class GC_minimark(GcDescription): + malloc_zero_filled = True + def get_description(config): name = config.translation.gc Modified: pypy/branch/minimark-jit/pypy/rpython/memory/gc/generation.py ============================================================================== --- pypy/branch/minimark-jit/pypy/rpython/memory/gc/generation.py (original) +++ pypy/branch/minimark-jit/pypy/rpython/memory/gc/generation.py Mon Sep 27 14:02:27 2010 @@ -147,6 +147,11 @@ def get_young_var_basesize(nursery_size): return nursery_size // 4 - 1 + @classmethod + def JIT_max_size_of_young_obj(cls): + min_nurs_size = cls.TRANSLATION_PARAMS['min_nursery_size'] + return cls.get_young_fixedsize(min_nurs_size) + def is_in_nursery(self, addr): ll_assert(llmemory.cast_adr_to_int(addr) & 1 == 0, "odd-valued (i.e. tagged) pointer unexpected here") Modified: pypy/branch/minimark-jit/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/minimark-jit/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/minimark-jit/pypy/rpython/memory/gc/minimark.py Mon Sep 27 14:02:27 2010 @@ -719,6 +719,10 @@ # "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS + @classmethod + def JIT_max_size_of_young_obj(cls): + return cls.TRANSLATION_PARAMS['large_object'] + def write_barrier(self, addr_struct): if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS: self.remember_young_pointer(addr_struct) From arigo at codespeak.net Mon Sep 27 14:11:04 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 27 Sep 2010 14:11:04 +0200 (CEST) Subject: [pypy-svn] r77408 - pypy/branch/minimark-jit/pypy/jit/backend/x86/test Message-ID: <20100927121104.AE022282C02@codespeak.net> Author: arigo Date: Mon Sep 27 14:11:03 2010 New Revision: 77408 Modified: pypy/branch/minimark-jit/pypy/jit/backend/x86/test/test_zrpy_gc.py pypy/branch/minimark-jit/pypy/jit/backend/x86/test/test_ztranslation.py Log: Change (arbitrarily) one of the test files to use minimark instead of the hybrid GC. Modified: pypy/branch/minimark-jit/pypy/jit/backend/x86/test/test_zrpy_gc.py ============================================================================== --- pypy/branch/minimark-jit/pypy/jit/backend/x86/test/test_zrpy_gc.py (original) +++ pypy/branch/minimark-jit/pypy/jit/backend/x86/test/test_zrpy_gc.py Mon Sep 27 14:11:03 2010 @@ -127,6 +127,8 @@ # ______________________________________________________________________ class TestCompileHybrid(object): + # Test suite using the hybrid GC. We should ideally also have one + # using the minimark GC, but these tests take forever... def setup_class(cls): funcs = [] name_to_func = {} Modified: pypy/branch/minimark-jit/pypy/jit/backend/x86/test/test_ztranslation.py ============================================================================== --- pypy/branch/minimark-jit/pypy/jit/backend/x86/test/test_ztranslation.py (original) +++ pypy/branch/minimark-jit/pypy/jit/backend/x86/test/test_ztranslation.py Mon Sep 27 14:11:03 2010 @@ -118,7 +118,7 @@ def _get_TranslationContext(self): t = TranslationContext() - t.config.translation.gc = 'hybrid' + t.config.translation.gc = 'minimark' t.config.translation.gcrootfinder = 'asmgcc' t.config.translation.list_comprehension_operations = True t.config.translation.gcremovetypeptr = True From cfbolz at codespeak.net Mon Sep 27 14:32:19 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Mon, 27 Sep 2010 14:32:19 +0200 (CEST) Subject: [pypy-svn] r77409 - pypy/extradoc/talk/pepm2011 Message-ID: <20100927123219.CE926282C02@codespeak.net> Author: cfbolz Date: Mon Sep 27 14:32:18 2010 New Revision: 77409 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: refactor a number of things, add many XXXs Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Mon Sep 27 14:32:18 2010 @@ -91,8 +91,12 @@ XXX drop the word "allocation removal" somewhere +XXX define "escape analysis" + \section{Introduction} +XXX need to re-target introduction a bit to fit PEPMs focus + The goal of a just-in-time compiler for a dynamic language is obviously to improve the speed of the language over an implementation of the language that uses interpretation. The first goal of a JIT is thus to remove the @@ -122,8 +126,9 @@ A recently popular approach to implementing just-in-time compilers for dynamic languages is that of a tracing JIT. A tracing JIT often takes the form of an -extension to an existing interpreter, which can be sped up that way. The PyPy -project is an environment for implementing dynamic programming languages. It's +extension to an existing interpreter, which can be sped up that way. This +approach is also the one taken by the PyPy project, which is an environment for +implementing dynamic programming languages. PyPy's approach to doing so is to straightforwardly implement an interpreter for the to-be-implemented language, and then use powerful tools to turn the interpreter into an efficient VM that also contains a just-in-time compiler. This compiler @@ -152,7 +157,7 @@ The contributions of this paper are: \begin{enumerate} - \item An efficient and effective algorithm for removing objects allocations in a tracing JIT. + \item An efficient and effective algorithm for removing object allocations in a tracing JIT. \item XXX \end{enumerate} @@ -232,6 +237,7 @@ on. These guards are the only mechanism to stop the execution of a trace, the loop end condition also takes the form of a guard. +bridges? arguments to traces @@ -316,10 +322,15 @@ return res \end{verbatim} -The loop iterates \texttt{y} times, and computes something in the process. To -understand the reason why executing this function is slow, here is the trace -that is produced by the tracing JIT when executing the function with \texttt{y} -being a \texttt{BoxedInteger}: XXX make it clear that this is really a trace specific for BoxedInteger +The loop iterates \texttt{y} times, and computes something in the process. +Simply running this function is slow, because there are lots of virtual method +calls inside the loop, one for each \texttt{is\_positive} and even two for each +call to \texttt{add}. These method calls need to check the type of the involved +objects repeatedly and redundantly. In addition, a lot of objects are created +when executing that loop, many of these objects do not survive for very long. +The actual computation that is performed by \texttt{f} is simply a number of +float or integer additions. + \begin{figure} \begin{verbatim} @@ -378,17 +389,34 @@ guard_true(i17) jump(p15, p10) \end{verbatim} -\caption{unoptimized trace for the simple object model} +\label{fig:unopt-trace} +\caption{Unoptimized Trace for the Simple Object Model} \end{figure} -(indentation corresponds to the stack level of the traced functions). +If the function is executed using the tracing JIT, with \texttt{y} being a +\texttt{BoxedInteger}, the produced trace looks like +Figure~\ref{fig:unopt-trace}. The operations in the trace are indented to +correspond to the stack level of the function that contains the traced +operation. The trace also shows the inefficiencies of \texttt{f} clearly, if one +looks at the number of \texttt{new}, \texttt{set/getfield\_gc} and +\texttt{guard\_class} operations. + +Note how the functions that are called by \texttt{f} are automatically inlined +into the trace. The method calls are always preceded by a \texttt{guard\_class} +operation, to check that the class of the receiver is the same as the one that +was observed during tracing.\footnote{\texttt{guard\_class} performs a precise +class check, not checking for subclasses} These guards make the trace specific +to the situation where \texttt{y} is really a \texttt{BoxedInteger}, it can +already be said to be specialized for \texttt{BoxedIntegers}. When the trace is +turned into machine code and then executed with \texttt{BoxedFloats}, the +first \texttt{guard\_class} instruction will fail and execution will continue +using the interpreter. -The trace is inefficient for a couple of reasons. One problem is that it checks -repeatedly and redundantly for the class of the objects around, using a -\texttt{guard\_class} instruction. In addition, some new \texttt{BoxedInteger} instances are -constructed using the \texttt{new} operation, only to be used once and then forgotten -a bit later. In the next section, we will see how this can be improved upon, -using escape analysis. +XXX simplify traces a bit more +get rid of \_gc suffix in set/getfield\_gc + +In the next section, we will see how this can be improved upon, using escape +analysis. XXX \section{Object Lifetimes in a Tracing JIT} \label{sec:lifetimes} @@ -400,38 +428,53 @@ tracing JIT compiler. \begin{figure} -\includegraphics{figures/obj-lifetime.pdf} +\includegraphics[scale=0.7]{figures/obj-lifetime.pdf} \caption{Object Lifetimes in a Trace} \label{fig:lifetimes} \end{figure} -The figure shows a trace before optimization, together with the lifetime of -various kinds of objects created in the trace. It is executed from top to -bottom. At the bottom, a jump is used to execute the same loop another time. -For clarity, the figure shows two iterations of the loop. -The loop is executed until one of the guards in the trace fails, and the -execution is aborted. - -Some of the operations within this trace are \texttt{new} operations, which each create a -new instance of some class. These instances are used for a while, e.g. by -calling methods on them, reading and writing their fields. Some of these -instances escape, which means that they are stored in some globally accessible -place or are passed into a function. +Figure~\ref{fig:lifetimes} shows a trace before optimization, together with the +lifetime of various kinds of objects created in the trace. It is executed from +top to bottom. At the bottom, a jump is used to execute the same loop another +time (for clarity, the figure shows two iterations of the loop). The loop is +executed until one of the guards in the trace fails, and the execution is +aborted and interpretation resumes. + +Some of the operations within this trace are \texttt{new} operations, which each +create a new instance of some class. These instances are used for a while, e.g. +by calling methods on them (which are inlined into the trace), reading and +writing their fields. Some of these instances \emph{escape}, which means that +they are stored in some globally accessible place or are passed into a function. Together with the \texttt{new} operations, the figure shows the lifetimes of the -created objects. Objects in category 1 live for a while, and are then just not -used any more. The creation of these objects is removed by the -optimization described in the last section. +created objects. The objects that are created within a trace using \texttt{new} +fall into one of several categories: -Objects in category 2 live for a while and then escape. The optimization of the -last section deals with them too: the \texttt{new} that creates them and -the field accesses are deferred, until the point where the object escapes. +\begin{itemize} + \item Category 1: Objects that live for a while, and are then just not + used any more. + + \item Category 2: Objects that live for a while and then escape. + + \item Category 3: Objects that live for a while, survive across the jump to + the beginning of the loop, and are then not used any more. + + \item Category 4: Objects that live for a while, survive across the jump, + and then escape. To these we also count the objects that live across several + jumps and then either escape or stop being used\footnote{In theory, the + approach of Section~\ref{sec:XXX} works also for objects that live for + exactly $n>1$ iterations and then don't escape, but we expect this to be a + very rare case, so we do not handle it.} +\end{itemize} + +The objects that are allocated in the example trace in +Figure~\ref{fig:unopt-trace} fall into categories 1 and 3. Objects stored in +\texttt{p5, p6, p11 XXX} are in category 1, objects in \texttt{p10, p15} are in +category 3. -The objects in category 3 and 4 are in principle like the objects in category 1 -and 2. They are created, live for a while, but are then passed as an argument -to the \texttt{jump} operation. In the next iteration they can either die (category -3) or escape (category 4). +The creation of objects in category 1 is removed by the optimization described +in Section~\ref{sec:virtuals}. XXX \section{Escape Analysis in a Tracing JIT} \label{sec:virtuals} @@ -439,26 +482,29 @@ \subsection{Virtual Objects} -The main insight to improve the code shown in the last section is that some of -the objects created in the trace using a \texttt{new} operation don't survive very -long and are collected by the garbage collector soon after their allocation. -Moreover, they are used only inside the loop, thus we can easily prove that -nobody else in the program stores a reference to them. The -idea for improving the code is thus to analyze which objects never escape the -loop and may thus not be allocated at all. +The main insight to improve the code shown in the last section is that objects +in category 1 don't survive very long and are collected by the garbage collector +soon after their allocation. Moreover, they are used only inside the loop and +nobody else in the program stores a reference to them. The idea for improving +the code is thus to analyze which objects fall in category 1 and which may thus +not be allocated at all. + +XXX is "symbolic execution" the right word to drop? This process is called \emph{escape analysis}. The escape analysis of our tracing JIT works by using \emph{virtual objects}: The trace is walked from beginning to end and whenever a \texttt{new} operation is seen, the operation is removed and a virtual object is constructed. The virtual object summarizes the shape of the object that is allocated at this position in the original trace, -and is used by the escape analysis to improve the trace. The shape describes +and is used by the optimization to improve the trace. The shapes describe where the values that would be stored in the fields of the allocated objects come from. Whenever the optimizer sees a \texttt{setfield} that writes into a virtual object, that shape summary is thus updated and the operation can be removed. When the optimizer encounters a \texttt{getfield} from a virtual, the result is read from the virtual object, and the operation is also removed. +XXX what happens on a guard\_class? + In the example from last section, the following operations would produce two virtual objects, and be completely removed from the optimized trace: @@ -511,48 +557,41 @@ values that the virtual object has. This means that instead of the jump, the following operations are emitted: -\begin{verbatim} -p15 = new(BoxedInteger) -setfield_gc(p15, i14, intval) -p10 = new(BoxedInteger) -setfield_gc(p10, i9, intval) -jump(p15, p10) -\end{verbatim} +\texttt{ +\begin{tabular}{l} +$p_{15}$ = new(BoxedInteger) \\ +setfield\_gc($p_{15}$, $i_{14}$, intval) \\ +$p_{10}$ = new(BoxedInteger) \\ +setfield\_gc($p_{10}$, $i_{9}$, intval) \\ +jump($p_{15}$, $p_{10}$) \\ +\end{tabular} +} -Note how the operations for creating these two instances has been moved down the +Note how the operations for creating these two instances have been moved down the trace. It looks like for these operations we actually didn't win much, because the objects are still allocated at the end. However, the optimization was still worthwhile even in this case, because some operations that have been performed on the forced virtual objects have been removed (some \texttt{getfield\_gc} operations and \texttt{guard\_class} operations). -The final optimized trace of the example looks like this: - -\begin{verbatim} -# arguments to the trace: p0, p1 -guard_class(p1, BoxedInteger) -i2 = getfield_gc(p1, intval) -guard_class(p0, BoxedInteger) -i3 = getfield_gc(p0, intval) -i4 = int_add(i2, i3) -i9 = int_add(i4, -100) +\begin{figure} +\includegraphics{figures/step1.pdf} +\label{fig:step1} +\caption{Resulting Trace After Allocation Removal} +\end{figure} -guard_class(p0, BoxedInteger) -i12 = getfield_gc(p0, intval) -i14 = int_add(i12, -1) - -i17 = int_gt(i14, 0) -guard_true(i17) -p15 = new(BoxedInteger) -setfield_gc(p15, i14, intval) -p10 = new(BoxedInteger) -setfield_gc(p10, i9, intval) -jump(p15, p10) -\end{verbatim} +The final optimized trace of the example can be seen in +Figure~\ref{fig:step1}. The optimized trace contains only two allocations, instead of the original five, and only three \texttt{guard\_class} operations, from the original seven. +\subsection{Algorithm} +\label{sub:Algorithm} + +XXX want some sort of pseudo-code + +% subsection Algorithm (end) %___________________________________________________________________________ @@ -564,6 +603,10 @@ of the type dispatching overhead. In the next section, we will explain how this optimization can be improved further. +XXX Category 2 The optimization of +Section~\ref{sec:virtuals} deals with them too: the \texttt{new} that creates them and +the field accesses are deferred, until the point where the object escapes. + % section Escape Analysis in a Tracing JIT (end) \section{Escape Analysis Across Loop Boundaries} @@ -598,10 +641,6 @@ The final trace was much better than the original one, because many allocations were removed from it. However, it also still contained allocations: -\begin{figure} -\includegraphics{figures/step1.pdf} -\end{figure} - The two new \texttt{BoxedIntegers} stored in \texttt{p15} and \texttt{p10} are passed into the next iteration of the loop. The next iteration will check that they are indeed \texttt{BoxedIntegers}, read their \texttt{intval} fields and then not use them From arigo at codespeak.net Mon Sep 27 14:57:06 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 27 Sep 2010 14:57:06 +0200 (CEST) Subject: [pypy-svn] r77410 - in pypy/branch/minimark-jit/pypy: config jit/backend/x86/test Message-ID: <20100927125706.267ED282C06@codespeak.net> Author: arigo Date: Mon Sep 27 14:57:04 2010 New Revision: 77410 Modified: pypy/branch/minimark-jit/pypy/config/translationoption.py pypy/branch/minimark-jit/pypy/jit/backend/x86/test/test_zrpy_gc.py pypy/branch/minimark-jit/pypy/jit/backend/x86/test/test_ztranslation.py Log: Switch from using hybrid to using the DEFL_GC, which is set in pypy.config.translationoption -- now to "minimark". Modified: pypy/branch/minimark-jit/pypy/config/translationoption.py ============================================================================== --- pypy/branch/minimark-jit/pypy/config/translationoption.py (original) +++ pypy/branch/minimark-jit/pypy/config/translationoption.py Mon Sep 27 14:57:04 2010 @@ -11,6 +11,8 @@ DEFL_CLEVER_MALLOC_REMOVAL_INLINE_THRESHOLD = 32.4 DEFL_LOW_INLINE_THRESHOLD = DEFL_INLINE_THRESHOLD / 2.0 +DEFL_GC = "minimark" + IS_64_BITS = sys.maxint > 2147483647 PLATFORMS = [ @@ -105,7 +107,7 @@ # JIT generation: use -Ojit to enable it BoolOption("jit", "generate a JIT", default=False, - suggests=[("translation.gc", "hybrid"), + suggests=[("translation.gc", DEFL_GC), ("translation.gcrootfinder", "asmgcc"), ("translation.list_comprehension_operations", True)]), ChoiceOption("jit_backend", "choose the backend for the JIT", @@ -337,10 +339,10 @@ '0': 'boehm nobackendopt', '1': 'boehm lowinline', 'size': 'boehm lowinline remove_asserts', - 'mem': 'markcompact lowinline remove_asserts removetypeptr', - '2': 'hybrid extraopts', - '3': 'hybrid extraopts remove_asserts', - 'jit': 'hybrid extraopts jit', + 'mem': DEFL_GC + ' lowinline remove_asserts removetypeptr', + '2': DEFL_GC + ' extraopts', + '3': DEFL_GC + ' extraopts remove_asserts', + 'jit': DEFL_GC + ' extraopts jit', } def final_check_config(config): Modified: pypy/branch/minimark-jit/pypy/jit/backend/x86/test/test_zrpy_gc.py ============================================================================== --- pypy/branch/minimark-jit/pypy/jit/backend/x86/test/test_zrpy_gc.py (original) +++ pypy/branch/minimark-jit/pypy/jit/backend/x86/test/test_zrpy_gc.py Mon Sep 27 14:57:04 2010 @@ -18,6 +18,7 @@ from pypy.jit.backend.llsupport.gc import GcLLDescr_framework from pypy.tool.udir import udir from pypy.jit.backend.x86.arch import IS_X86_64 +from pypy.config.translationoption import DEFL_GC import py.test class X(object): @@ -126,9 +127,8 @@ # ______________________________________________________________________ -class TestCompileHybrid(object): - # Test suite using the hybrid GC. We should ideally also have one - # using the minimark GC, but these tests take forever... +class TestCompileFramework(object): + # Test suite using (so far) the minimark GC. def setup_class(cls): funcs = [] name_to_func = {} @@ -177,13 +177,13 @@ OLD_DEBUG = GcLLDescr_framework.DEBUG try: GcLLDescr_framework.DEBUG = True - cls.cbuilder = compile(get_entry(allfuncs), "hybrid", + cls.cbuilder = compile(get_entry(allfuncs), DEFL_GC, gcrootfinder="asmgcc", jit=True) finally: GcLLDescr_framework.DEBUG = OLD_DEBUG def run(self, name, n=2000): - pypylog = udir.join('TestCompileHybrid.log') + pypylog = udir.join('TestCompileFramework.log') res = self.cbuilder.cmdexec("%s %d" %(name, n), env={'PYPYLOG': ':%s' % pypylog}) assert int(res) == 20 @@ -191,7 +191,7 @@ def run_orig(self, name, n, x): self.main_allfuncs(name, n, x) - def define_compile_hybrid_1(cls): + def define_compile_framework_1(cls): # a moving GC. Supports malloc_varsize_nonmovable. Simple test, works # without write_barriers and root stack enumeration. def f(n, x, *args): @@ -201,10 +201,10 @@ return (n, x) + args return None, f, None - def test_compile_hybrid_1(self): - self.run('compile_hybrid_1') + def test_compile_framework_1(self): + self.run('compile_framework_1') - def define_compile_hybrid_2(cls): + def define_compile_framework_2(cls): # More complex test, requires root stack enumeration but # not write_barriers. def f(n, x, *args): @@ -217,10 +217,10 @@ return (n, x) + args return None, f, None - def test_compile_hybrid_2(self): - self.run('compile_hybrid_2') + def test_compile_framework_2(self): + self.run('compile_framework_2') - def define_compile_hybrid_3(cls): + def define_compile_framework_3(cls): # Third version of the test. Really requires write_barriers. def f(n, x, *args): x.next = None @@ -243,13 +243,13 @@ - def test_compile_hybrid_3(self): + def test_compile_framework_3(self): x_test = X() x_test.foo = 5 - self.run_orig('compile_hybrid_3', 6, x_test) # check that it does not raise CheckError - self.run('compile_hybrid_3') + self.run_orig('compile_framework_3', 6, x_test) # check that it does not raise CheckError + self.run('compile_framework_3') - def define_compile_hybrid_3_extra(cls): + def define_compile_framework_3_extra(cls): # Extra version of the test, with tons of live vars around the residual # call that all contain a GC pointer. @dont_look_inside @@ -289,11 +289,11 @@ return n, None, x0, x1, x2, x3, x4, x5, x6, x7, None, None return before, f, None - def test_compile_hybrid_3_extra(self): - self.run_orig('compile_hybrid_3_extra', 6, None) # check that it does not raise CheckError - self.run('compile_hybrid_3_extra') + def test_compile_framework_3_extra(self): + self.run_orig('compile_framework_3_extra', 6, None) # check that it does not raise CheckError + self.run('compile_framework_3_extra') - def define_compile_hybrid_4(cls): + def define_compile_framework_4(cls): # Fourth version of the test, with __del__. from pypy.rlib.debug import debug_print class Counter: @@ -313,10 +313,10 @@ return (n, x) + args return before, f, None - def test_compile_hybrid_4(self): - self.run('compile_hybrid_4') + def test_compile_framework_4(self): + self.run('compile_framework_4') - def define_compile_hybrid_5(cls): + def define_compile_framework_5(cls): # Test string manipulation. def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): n -= x.foo @@ -326,10 +326,10 @@ check(len(s) == 1*5 + 2*45 + 3*450 + 4*500) return None, f, after - def test_compile_hybrid_5(self): - self.run('compile_hybrid_5') + def test_compile_framework_5(self): + self.run('compile_framework_5') - def define_compile_hybrid_7(cls): + def define_compile_framework_7(cls): # Array of pointers (test the write barrier for setarrayitem_gc) def before(n, x): return n, x, None, None, None, None, None, None, None, None, [X(123)], None @@ -393,10 +393,10 @@ check(l[15].x == 142) return before, f, after - def test_compile_hybrid_7(self): - self.run('compile_hybrid_7') + def test_compile_framework_7(self): + self.run('compile_framework_7') - def define_compile_hybrid_external_exception_handling(cls): + def define_compile_framework_external_exception_handling(cls): def before(n, x): x = X(0) return n, x, None, None, None, None, None, None, None, None, None, None @@ -429,10 +429,10 @@ return before, f, None - def test_compile_hybrid_external_exception_handling(self): - self.run('compile_hybrid_external_exception_handling') + def test_compile_framework_external_exception_handling(self): + self.run('compile_framework_external_exception_handling') - def define_compile_hybrid_bug1(self): + def define_compile_framework_bug1(self): @purefunction def nonmoving(): x = X(1) @@ -455,10 +455,10 @@ return None, f, None - def test_compile_hybrid_bug1(self): - self.run('compile_hybrid_bug1', 200) + def test_compile_framework_bug1(self): + self.run('compile_framework_bug1', 200) - def define_compile_hybrid_vref(self): + def define_compile_framework_vref(self): from pypy.rlib.jit import virtual_ref, virtual_ref_finish class A: pass @@ -471,10 +471,10 @@ return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s return None, f, None - def test_compile_hybrid_vref(self): - self.run('compile_hybrid_vref', 200) + def test_compile_framework_vref(self): + self.run('compile_framework_vref', 200) - def define_compile_hybrid_float(self): + def define_compile_framework_float(self): # test for a bug: the fastpath_malloc does not save and restore # xmm registers around the actual call to the slow path class A: @@ -521,5 +521,5 @@ return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s return None, f, None - def test_compile_hybrid_float(self): - self.run('compile_hybrid_float') + def test_compile_framework_float(self): + self.run('compile_framework_float') Modified: pypy/branch/minimark-jit/pypy/jit/backend/x86/test/test_ztranslation.py ============================================================================== --- pypy/branch/minimark-jit/pypy/jit/backend/x86/test/test_ztranslation.py (original) +++ pypy/branch/minimark-jit/pypy/jit/backend/x86/test/test_ztranslation.py Mon Sep 27 14:57:04 2010 @@ -8,6 +8,7 @@ from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.translator.translator import TranslationContext from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64 +from pypy.config.translationoption import DEFL_GC class TestTranslationX86(CCompiledMixin): CPUClass = getcpuclass() @@ -118,7 +119,7 @@ def _get_TranslationContext(self): t = TranslationContext() - t.config.translation.gc = 'minimark' + t.config.translation.gc = DEFL_GC # 'hybrid' or 'minimark' t.config.translation.gcrootfinder = 'asmgcc' t.config.translation.list_comprehension_operations = True t.config.translation.gcremovetypeptr = True From arigo at codespeak.net Mon Sep 27 15:16:36 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 27 Sep 2010 15:16:36 +0200 (CEST) Subject: [pypy-svn] r77411 - in pypy/branch/minimark-jit/pypy/rpython/memory/gc: . test Message-ID: <20100927131636.D51A6282C06@codespeak.net> Author: arigo Date: Mon Sep 27 15:16:35 2010 New Revision: 77411 Modified: pypy/branch/minimark-jit/pypy/rpython/memory/gc/minimarkpage.py pypy/branch/minimark-jit/pypy/rpython/memory/gc/test/test_minimarkpage.py Log: Don't use 4 as a shift in these tests; use WORD. At least things are still word-aligned. Modified: pypy/branch/minimark-jit/pypy/rpython/memory/gc/minimarkpage.py ============================================================================== --- pypy/branch/minimark-jit/pypy/rpython/memory/gc/minimarkpage.py (original) +++ pypy/branch/minimark-jit/pypy/rpython/memory/gc/minimarkpage.py Mon Sep 27 15:16:35 2010 @@ -336,7 +336,7 @@ def _start_of_page_untranslated(addr, page_size): assert isinstance(addr, llarena.fakearenaaddress) - shift = 4 # for testing, we assume that the whole arena is not + shift = WORD # for testing, we assume that the whole arena is not # on a page boundary ofs = ((addr.offset - shift) // page_size) * page_size + shift return llarena.fakearenaaddress(addr.arena, ofs) Modified: pypy/branch/minimark-jit/pypy/rpython/memory/gc/test/test_minimarkpage.py ============================================================================== --- pypy/branch/minimark-jit/pypy/rpython/memory/gc/test/test_minimarkpage.py (original) +++ pypy/branch/minimark-jit/pypy/rpython/memory/gc/test/test_minimarkpage.py Mon Sep 27 15:16:35 2010 @@ -7,22 +7,22 @@ from pypy.rpython.lltypesystem.llmemory import cast_ptr_to_adr NULL = llmemory.NULL -SHIFT = 4 +SHIFT = WORD hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) def test_allocate_arena(): - ac = ArenaCollection(SHIFT + 8*20, 8, 1) + ac = ArenaCollection(SHIFT + 16*20, 16, 1) ac.allocate_new_arena() assert ac.num_uninitialized_pages == 20 - ac.uninitialized_pages + 8*20 # does not raise - py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 8*20 + 1") + ac.uninitialized_pages + 16*20 # does not raise + py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 16*20 + 1") # - ac = ArenaCollection(SHIFT + 8*20 + 7, 8, 1) + ac = ArenaCollection(SHIFT + 16*20 + 7, 16, 1) ac.allocate_new_arena() assert ac.num_uninitialized_pages == 20 - ac.uninitialized_pages + 8*20 + 7 # does not raise - py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 8*20 + 8") + ac.uninitialized_pages + 16*20 + 7 # does not raise + py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 16*20 + 16") def test_allocate_new_page(): From arigo at codespeak.net Mon Sep 27 15:59:06 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 27 Sep 2010 15:59:06 +0200 (CEST) Subject: [pypy-svn] r77412 - pypy/extradoc/talk/pepm2011 Message-ID: <20100927135906.D7D58282C06@codespeak.net> Author: arigo Date: Mon Sep 27 15:59:05 2010 New Revision: 77412 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: Tweaks. Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Mon Sep 27 15:59:05 2010 @@ -143,7 +143,8 @@ and type dispatching. To understand the problem more closely, we analyze the occurring object lifetimes in Section~\ref{sec:lifetimes}. The most important technique to achieve this is a form of escape analysis \cite{XXX} that we call -\emph{virtual objects}, which is described in Section~\ref{sec:virtuals}. The +\emph{virtual objects},\footnote{The terminology comes from \cite{Psyco}} +which is described in Section~\ref{sec:virtuals}. The goal of virtual objects is to remove allocations of temporary objects that have a predictable lifetime and to optimize type dispatching in the process. @@ -313,6 +314,11 @@ To understand the problems more directly, let us consider a simple function that uses the object model: +XXX this is not an RPython interpreter; put a reference to the previous +paper to show how we deal with an interpreted piece of code and remove +the interpretation overhead, turning it into basically something +equivalent to the example here, which is the start of the present paper. + \begin{verbatim} def f(y): res = BoxedInteger(0) @@ -395,7 +401,7 @@ If the function is executed using the tracing JIT, with \texttt{y} being a \texttt{BoxedInteger}, the produced trace looks like -Figure~\ref{fig:unopt-trace}. The operations in the trace are indented to +Figure~\ref{fig:unopt-trace}. The operations in the trace are shown indented to correspond to the stack level of the function that contains the traced operation. The trace also shows the inefficiencies of \texttt{f} clearly, if one looks at the number of \texttt{new}, \texttt{set/getfield\_gc} and @@ -445,7 +451,8 @@ create a new instance of some class. These instances are used for a while, e.g. by calling methods on them (which are inlined into the trace), reading and writing their fields. Some of these instances \emph{escape}, which means that -they are stored in some globally accessible place or are passed into a function. +they are stored in some globally accessible place or are passed into a +non-inlined function via a residual call. Together with the \texttt{new} operations, the figure shows the lifetimes of the created objects. The objects that are created within a trace using \texttt{new} @@ -462,7 +469,7 @@ \item Category 4: Objects that live for a while, survive across the jump, and then escape. To these we also count the objects that live across several - jumps and then either escape or stop being used\footnote{In theory, the + jumps and then either escape or stop being used.\footnote{In theory, the approach of Section~\ref{sec:XXX} works also for objects that live for exactly $n>1$ iterations and then don't escape, but we expect this to be a very rare case, so we do not handle it.} @@ -483,10 +490,9 @@ \subsection{Virtual Objects} The main insight to improve the code shown in the last section is that objects -in category 1 don't survive very long and are collected by the garbage collector -soon after their allocation. Moreover, they are used only inside the loop and +in category 1 don't survive very long -- they are used only inside the loop and nobody else in the program stores a reference to them. The idea for improving -the code is thus to analyze which objects fall in category 1 and which may thus +the code is thus to analyze which objects fall in category 1 and may thus not be allocated at all. XXX is "symbolic execution" the right word to drop? @@ -494,7 +500,11 @@ This process is called \emph{escape analysis}. The escape analysis of our tracing JIT works by using \emph{virtual objects}: The trace is walked from beginning to end and whenever a \texttt{new} operation is seen, the operation is -removed and a virtual object is constructed. The virtual object summarizes the +removed and a virtual object\footnote{XXX what I have in mind when I talk +of ``virtual object'' is the run-time behavior -- i.e. a real object that +would exist at run-time, except that it has be virtual-ized. Here you seem +to mean rather ``virtual object description'' or something.} +is constructed. The virtual object summarizes the shape of the object that is allocated at this position in the original trace, and is used by the optimization to improve the trace. The shapes describe where the values that would be stored in the fields of the allocated objects @@ -550,6 +560,8 @@ is stored in a globally accessible place, the object needs to actually be allocated, as it will live longer than one iteration of the loop. +XXX ``the trace above'' is dangerous; should mention all figures by numbers + This is what happens at the end of the trace above, when the \texttt{jump} operation is hit. The arguments of the jump are at this point virtual objects. Before the jump is emitted, they are \emph{forced}. This means that the optimizers produces code @@ -557,6 +569,8 @@ values that the virtual object has. This means that instead of the jump, the following operations are emitted: +XXX should the variables be written in $math-style$ everywhere? + \texttt{ \begin{tabular}{l} $p_{15}$ = new(BoxedInteger) \\ @@ -666,12 +680,16 @@ \includegraphics{figures/step2.pdf} \end{figure} +XXX the figure is moved elsewhere by latex + Now the lifetime of the remaining allocations no longer crosses the jump, and we can run our escape analysis a second time, to get the following trace: \begin{figure} \includegraphics{figures/step3.pdf} \end{figure} +XXX the figure is moved elsewhere by latex + This result is now really good. The code performs the same operations than the original code, but using direct CPU arithmetic and no boxing, as opposed to the original version which used dynamic dispatching and boxing. @@ -683,7 +701,7 @@ all any more, but it still has the same behaviour. If the original loop had used \texttt{BoxedFloats}, the final loop would use \texttt{float\_*} operations everywhere instead (or even be very different, if the object model had -user-defined classes). +more different classes). %___________________________________________________________________________ @@ -713,6 +731,8 @@ \includegraphics{figures/step4.pdf} \end{figure} +XXX the figure is moved elsewhere by latex + XXX optimization particularly effective for chains of operations %___________________________________________________________________________ From antocuni at codespeak.net Mon Sep 27 16:16:35 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Mon, 27 Sep 2010 16:16:35 +0200 (CEST) Subject: [pypy-svn] r77413 - in pypy/branch/jitffi/pypy/jit/backend: llgraph llsupport llsupport/test test Message-ID: <20100927141635.2351B282C06@codespeak.net> Author: antocuni Date: Mon Sep 27 16:16:33 2010 New Revision: 77413 Added: pypy/branch/jitffi/pypy/jit/backend/llsupport/ffisupport.py (contents, props changed) pypy/branch/jitffi/pypy/jit/backend/llsupport/test/test_ffisupport.py (contents, props changed) Modified: pypy/branch/jitffi/pypy/jit/backend/llgraph/runner.py pypy/branch/jitffi/pypy/jit/backend/llsupport/descr.py pypy/branch/jitffi/pypy/jit/backend/llsupport/llmodel.py pypy/branch/jitffi/pypy/jit/backend/test/runner_test.py Log: add the possibility to dynamically build a calldescr for a function given its ffi types Modified: pypy/branch/jitffi/pypy/jit/backend/llgraph/runner.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/llgraph/runner.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/llgraph/runner.py Mon Sep 27 16:16:33 2010 @@ -297,6 +297,18 @@ return self.getdescr(0, token[0], extrainfo=extrainfo, arg_types=''.join(arg_types)) + def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo=None): + from pypy.jit.backend.llsupport.ffisupport import get_ffi_type_kind + arg_types = [] + for arg in ffi_args: + kind = get_ffi_type_kind(arg) + if kind != history.VOID: + arg_types.append(kind) + reskind = get_ffi_type_kind(ffi_result) + return self.getdescr(0, reskind, extrainfo=extrainfo, + arg_types=''.join(arg_types)) + + def grab_exc_value(self): return llimpl.grab_exc_value() Modified: pypy/branch/jitffi/pypy/jit/backend/llsupport/descr.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/llsupport/descr.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/llsupport/descr.py Mon Sep 27 16:16:33 2010 @@ -307,6 +307,21 @@ _return_type = history.INT call_stub = staticmethod(lambda func, args_i, args_r, args_f: 0) +class DynamicIntCallDescr(BaseIntCallDescr): + """ + calldescr that works for every integer type, by explicitly passing it the + size of the result. Used only by get_call_descr_dynamic + """ + _clsname = 'DynamicIntCallDescr' + + def __init__(self, arg_classes, result_size, extrainfo=None): + BaseIntCallDescr.__init__(self, arg_classes, extrainfo) + self._result_size = result_size + + def get_result_size(self, translate_support_code): + return self._result_size + + class NonGcPtrCallDescr(BaseIntCallDescr): _clsname = 'NonGcPtrCallDescr' def get_result_size(self, translate_support_code): Added: pypy/branch/jitffi/pypy/jit/backend/llsupport/ffisupport.py ============================================================================== --- (empty file) +++ pypy/branch/jitffi/pypy/jit/backend/llsupport/ffisupport.py Mon Sep 27 16:16:33 2010 @@ -0,0 +1,63 @@ +from pypy.jit.metainterp import history +from pypy.jit.backend.llsupport.descr import DynamicIntCallDescr, NonGcPtrCallDescr,\ + FloatCallDescr, VoidCallDescr + +def get_call_descr_dynamic(ffi_args, ffi_result, extrainfo=None): + """Get a call descr: the types of result and args are represented by + rlib.libffi.ffi_type_*""" + try: + reskind = get_ffi_type_kind(ffi_result) + argkinds = [get_ffi_type_kind(arg) for arg in ffi_args] + except KeyError: + return None # ?? + arg_classes = ''.join(argkinds) + if reskind == history.INT: + return DynamicIntCallDescr(arg_classes, ffi_result.c_size, extrainfo) + elif reskind == history.REF: + return NonGcPtrCallDescr(arg_classes, extrainfo) + elif reskind == history.FLOAT: + return FloatCallDescr(arg_classes, extrainfo) + elif reskind == history.VOID: + return VoidCallDescr(arg_classes, extrainfo) + assert False + + +# XXX: maybe we can turn this into a dictionary, but we need to do it at +# runtime as libffi.ffi_type_* are pointers +def get_ffi_type_kind(ffi_type): + from pypy.rlib import libffi + if ffi_type is libffi.ffi_type_void: + return history.VOID + elif ffi_type is libffi.ffi_type_pointer: + return history.REF + elif ffi_type is libffi.ffi_type_double: + return history.FLOAT + elif ffi_type is libffi.ffi_type_uchar: + return history.INT + elif ffi_type is libffi.ffi_type_uint8: + return history.INT + elif ffi_type is libffi.ffi_type_schar: + return history.INT + elif ffi_type is libffi.ffi_type_sint8: + return history.INT + elif ffi_type is libffi.ffi_type_uint16: + return history.INT + elif ffi_type is libffi.ffi_type_ushort: + return history.INT + elif ffi_type is libffi.ffi_type_sint16: + return history.INT + elif ffi_type is libffi.ffi_type_sshort: + return history.INT + elif ffi_type is libffi.ffi_type_uint: + return history.INT + elif ffi_type is libffi.ffi_type_uint32: + return history.INT + elif ffi_type is libffi.ffi_type_sint: + return history.INT + elif ffi_type is libffi.ffi_type_sint32: + return history.INT + ## elif ffi_type is libffi.ffi_type_uint64: + ## return history.INT + ## elif ffi_type is libffi.ffi_type_sint64: + ## return history.INT + raise KeyError Modified: pypy/branch/jitffi/pypy/jit/backend/llsupport/llmodel.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/llsupport/llmodel.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/llsupport/llmodel.py Mon Sep 27 16:16:33 2010 @@ -17,6 +17,7 @@ from pypy.jit.backend.llsupport.descr import get_call_descr from pypy.jit.backend.llsupport.descr import BaseIntCallDescr, GcPtrCallDescr from pypy.jit.backend.llsupport.descr import FloatCallDescr, VoidCallDescr +from pypy.jit.backend.llsupport.ffisupport import get_call_descr_dynamic from pypy.rpython.annlowlevel import cast_instance_to_base_ptr @@ -231,6 +232,9 @@ def calldescrof(self, FUNC, ARGS, RESULT, extrainfo=None): return get_call_descr(self.gc_ll_descr, ARGS, RESULT, extrainfo) + def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo=None): + return get_call_descr_dynamic(ffi_args, ffi_result, extrainfo) + def get_overflow_error(self): ovf_vtable = self.cast_adr_to_int(self._ovf_error_vtable) ovf_inst = lltype.cast_opaque_ptr(llmemory.GCREF, Added: pypy/branch/jitffi/pypy/jit/backend/llsupport/test/test_ffisupport.py ============================================================================== --- (empty file) +++ pypy/branch/jitffi/pypy/jit/backend/llsupport/test/test_ffisupport.py Mon Sep 27 16:16:33 2010 @@ -0,0 +1,18 @@ +from pypy.rlib import libffi +from pypy.jit.backend.llsupport.ffisupport import get_call_descr_dynamic, \ + VoidCallDescr, DynamicIntCallDescr + +def test_call_descr_dynamic(): + + args = [libffi.ffi_type_sint, libffi.ffi_type_double, libffi.ffi_type_pointer] + descr = get_call_descr_dynamic(args, libffi.ffi_type_void) + assert isinstance(descr, VoidCallDescr) + assert descr.arg_classes == 'ifr' + + descr = get_call_descr_dynamic([], libffi.ffi_type_sint8) + assert isinstance(descr, DynamicIntCallDescr) + assert descr.get_result_size(False) == 1 + + descr = get_call_descr_dynamic([], libffi.ffi_type_float) + assert descr is None # single floats are not supported so far + Modified: pypy/branch/jitffi/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/test/runner_test.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/test/runner_test.py Mon Sep 27 16:16:33 2010 @@ -421,6 +421,7 @@ assert x == 3.5 - 42 def test_call(self): + from pypy.rlib.libffi import ffi_type_sint, ffi_type_uchar, ffi_type_sint16 def func_int(a, b): return a + b @@ -428,23 +429,31 @@ return chr(ord(c) + ord(c1)) functions = [ - (func_int, lltype.Signed, 655360), - (func_int, rffi.SHORT, 1213), - (func_char, lltype.Char, 12) + (func_int, lltype.Signed, ffi_type_sint, 655360), + (func_int, rffi.SHORT, ffi_type_sint16, 1213), + (func_char, lltype.Char, ffi_type_uchar, 12) ] - for func, TP, num in functions: + for func, TP, ffi_type, num in functions: cpu = self.cpu # FPTR = self.Ptr(self.FuncType([TP, TP], TP)) func_ptr = llhelper(FPTR, func) FUNC = deref(FPTR) - calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) funcbox = self.get_funcbox(cpu, func_ptr) + # first, try it with the "normal" calldescr + calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) res = self.execute_operation(rop.CALL, [funcbox, BoxInt(num), BoxInt(num)], 'int', descr=calldescr) assert res.value == 2 * num + # then, try it with the dynamic calldescr + dyn_calldescr = cpu.calldescrof_dynamic([ffi_type, ffi_type], ffi_type) + res = self.execute_operation(rop.CALL, + [funcbox, BoxInt(num), BoxInt(num)], + 'int', descr=dyn_calldescr) + assert res.value == 2 * num + if cpu.supports_floats: def func(f0, f1, f2, f3, f4, f5, f6, i0, i1, f7, f8, f9): From arigo at codespeak.net Mon Sep 27 16:21:17 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 27 Sep 2010 16:21:17 +0200 (CEST) Subject: [pypy-svn] r77414 - pypy/extradoc/talk/pepm2011 Message-ID: <20100927142117.DEF6E282C06@codespeak.net> Author: arigo Date: Mon Sep 27 16:21:16 2010 New Revision: 77414 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: More tweaks. Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Mon Sep 27 16:21:16 2010 @@ -97,7 +97,7 @@ XXX need to re-target introduction a bit to fit PEPMs focus -The goal of a just-in-time compiler for a dynamic language is obviously to +The goal of a just-in-time (JIT) compiler for a dynamic language is obviously to improve the speed of the language over an implementation of the language that uses interpretation. The first goal of a JIT is thus to remove the interpretation overhead, i.e. the overhead of bytecode (or AST) dispatch and the @@ -131,7 +131,8 @@ implementing dynamic programming languages. PyPy's approach to doing so is to straightforwardly implement an interpreter for the to-be-implemented language, and then use powerful tools to turn the interpreter -into an efficient VM that also contains a just-in-time compiler. This compiler +into an efficient virtual machine (VM) that also contains a just-in-time compiler. +This compiler is automatically generated from the interpreter using partial-evaluation-like techniques \cite{bolz_tracing_2009}. The PyPy project and its approach to tracing JIT compilers is described in Section~\ref{sec:Background}. @@ -171,10 +172,11 @@ The work described in this paper was done in the context of the PyPy project \cite{armin_rigo_pypys_2006}. PyPy is an environment where dynamic languages can be implemented in a simple yet efficient way. The approach taken when -implementing a language with PyPy is to write an interpreter for the language in +implementing a language with PyPy is to write an \emph{interpreter} +for the language in \emph{RPython} \cite{davide_ancona_rpython:_2007}. RPython ("restricted Python") -is a subset of Python chosen in such a way, that type inference becomes -possible. The language interpreter can thus be translated with the help of +is a subset of Python chosen in such a way that type inference becomes +possible. The language interpreter can then be compiled (``translated'') with PyPy's tools into a VM on the C level. Because the interpreter is written at a relatively high level, the language implementation is kept free of low-level details, such as object layout, garbage collection or memory model. Those @@ -184,22 +186,22 @@ The feature that makes PyPy more than a compiler with a runtime system is it's support for automated JIT compiler generation \cite{bolz_tracing_2009}. During the translation to C, PyPy's tools can generate a just-in-time compiler for the -language that the interpreter is implementing. This process is not fully -automatic, but needs to be guided by the language implementer by some -source-code hints. Semi-automatically generating a JIT compiler has many advantages -over writing one manually, which is an error-prone and tedious process. The -generated JIT has the same semantics as the interpreter by construction, and all -languages implemented using PyPy benefit from improvements to the JIT generator. +language that the interpreter is implementing. This process is mostly +automatic; it only needs to be guided by the language implementer by a small number of +source-code hints. Mostly-automatically generating a JIT compiler has many advantages +over writing one manually, which is an error-prone and tedious process. +By construction, the generated JIT has the same semantics as the interpreter, and +the process benefits all languages implemented as an interpreter in RPython. The JIT that is produced by PyPy's JIT generator is a \emph{tracing JIT -compiler}, a concept which will be explained in more detail in the next section. +compiler}, a concept which we now explain in more details. \subsection{Tracing JIT Compilers} \label{sub:JIT_background} -Tracing JITs are a recently more popular approach to write just-in-time +Tracing JITs are a recently popular approach to write just-in-time compilers for dynamic languages \cite{XXX}. Their origins lie in the Dynamo -project, that used a tracing approach to optimize machine code using execution -traces \cite{XXX}. They were then adapted to be used for a very light-weight +project, which used a tracing approach to optimize machine code using execution +traces \cite{XXX}. Tracing JITs have then be adapted to be used for a very light-weight Java VM \cite{XXX} and afterwards used in several implementations of dynamic languages, such as JavaScript \cite{XXX}, Lua \cite{XXX} and now Python via PyPy. @@ -434,7 +436,9 @@ tracing JIT compiler. \begin{figure} +\begin{center} \includegraphics[scale=0.7]{figures/obj-lifetime.pdf} +\end{center} \caption{Object Lifetimes in a Trace} \label{fig:lifetimes} From arigo at codespeak.net Mon Sep 27 16:27:52 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 27 Sep 2010 16:27:52 +0200 (CEST) Subject: [pypy-svn] r77415 - pypy/branch/minimark-jit/pypy/rpython/memory/test Message-ID: <20100927142752.61A47282C06@codespeak.net> Author: arigo Date: Mon Sep 27 16:27:51 2010 New Revision: 77415 Modified: pypy/branch/minimark-jit/pypy/rpython/memory/test/test_gc.py Log: Fix for 64-bits. Modified: pypy/branch/minimark-jit/pypy/rpython/memory/test/test_gc.py ============================================================================== --- pypy/branch/minimark-jit/pypy/rpython/memory/test/test_gc.py (original) +++ pypy/branch/minimark-jit/pypy/rpython/memory/test/test_gc.py Mon Sep 27 16:27:51 2010 @@ -29,7 +29,7 @@ GC_CAN_MALLOC_NONMOVABLE = True GC_CAN_SHRINK_ARRAY = False GC_CAN_SHRINK_BIG_ARRAY = False - BUT_HOW_BIG_IS_A_BIG_STRING = 12 + BUT_HOW_BIG_IS_A_BIG_STRING = 3*WORD def setup_class(cls): cls._saved_logstate = py.log._getstate() From arigo at codespeak.net Mon Sep 27 16:29:23 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 27 Sep 2010 16:29:23 +0200 (CEST) Subject: [pypy-svn] r77416 - pypy/branch/minimark-jit/pypy/config/test Message-ID: <20100927142923.A26AA282C06@codespeak.net> Author: arigo Date: Mon Sep 27 16:29:22 2010 New Revision: 77416 Modified: pypy/branch/minimark-jit/pypy/config/test/test_pypyoption.py Log: Fix. Modified: pypy/branch/minimark-jit/pypy/config/test/test_pypyoption.py ============================================================================== --- pypy/branch/minimark-jit/pypy/config/test/test_pypyoption.py (original) +++ pypy/branch/minimark-jit/pypy/config/test/test_pypyoption.py Mon Sep 27 16:29:22 2010 @@ -41,7 +41,7 @@ assert not conf.translation.backendopt.none conf = get_pypy_config() set_opt_level(conf, 'mem') - assert conf.translation.gc == 'markcompact' + assert conf.translation.gcremovetypeptr assert not conf.translation.backendopt.none def test_set_pypy_opt_level(): From arigo at codespeak.net Mon Sep 27 17:35:47 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 27 Sep 2010 17:35:47 +0200 (CEST) Subject: [pypy-svn] r77418 - pypy/extradoc/planning Message-ID: <20100927153547.D3676282C06@codespeak.net> Author: arigo Date: Mon Sep 27 17:35:46 2010 New Revision: 77418 Modified: pypy/extradoc/planning/jit.txt Log: A minor x86_64 task. Modified: pypy/extradoc/planning/jit.txt ============================================================================== --- pypy/extradoc/planning/jit.txt (original) +++ pypy/extradoc/planning/jit.txt Mon Sep 27 17:35:46 2010 @@ -81,6 +81,10 @@ - guard_true(frame.is_being_profiled) all over the place +- at the end of every loop there is on x86-64: + int_and(i49, Const(0x8000000000f00000)) + which is not a constant that fits in 32-bits. Optimize that. + - xxx (find more examples :-) From arigo at codespeak.net Mon Sep 27 18:37:59 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 27 Sep 2010 18:37:59 +0200 (CEST) Subject: [pypy-svn] r77419 - in pypy/trunk/pypy: config config/test jit/backend/llsupport jit/backend/llsupport/test jit/backend/x86/test jit/metainterp rpython/memory/gc rpython/memory/gc/test rpython/memory/test Message-ID: <20100927163759.21C65282C06@codespeak.net> Author: arigo Date: Mon Sep 27 18:37:56 2010 New Revision: 77419 Modified: pypy/trunk/pypy/config/test/test_pypyoption.py pypy/trunk/pypy/config/translationoption.py pypy/trunk/pypy/jit/backend/llsupport/gc.py pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py pypy/trunk/pypy/jit/backend/x86/test/test_zrpy_gc.py pypy/trunk/pypy/jit/backend/x86/test/test_ztranslation.py pypy/trunk/pypy/jit/metainterp/gc.py pypy/trunk/pypy/rpython/memory/gc/generation.py pypy/trunk/pypy/rpython/memory/gc/minimark.py pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py pypy/trunk/pypy/rpython/memory/gc/test/test_minimarkpage.py pypy/trunk/pypy/rpython/memory/test/test_gc.py Log: Merge branch/minimark-jit: do some last fixes, notably with the JIT, and enable the "minimark" GC by default. Modified: pypy/trunk/pypy/config/test/test_pypyoption.py ============================================================================== --- pypy/trunk/pypy/config/test/test_pypyoption.py (original) +++ pypy/trunk/pypy/config/test/test_pypyoption.py Mon Sep 27 18:37:56 2010 @@ -41,7 +41,7 @@ assert not conf.translation.backendopt.none conf = get_pypy_config() set_opt_level(conf, 'mem') - assert conf.translation.gc == 'markcompact' + assert conf.translation.gcremovetypeptr assert not conf.translation.backendopt.none def test_set_pypy_opt_level(): Modified: pypy/trunk/pypy/config/translationoption.py ============================================================================== --- pypy/trunk/pypy/config/translationoption.py (original) +++ pypy/trunk/pypy/config/translationoption.py Mon Sep 27 18:37:56 2010 @@ -11,6 +11,8 @@ DEFL_CLEVER_MALLOC_REMOVAL_INLINE_THRESHOLD = 32.4 DEFL_LOW_INLINE_THRESHOLD = DEFL_INLINE_THRESHOLD / 2.0 +DEFL_GC = "minimark" + IS_64_BITS = sys.maxint > 2147483647 PLATFORMS = [ @@ -105,7 +107,7 @@ # JIT generation: use -Ojit to enable it BoolOption("jit", "generate a JIT", default=False, - suggests=[("translation.gc", "hybrid"), + suggests=[("translation.gc", DEFL_GC), ("translation.gcrootfinder", "asmgcc"), ("translation.list_comprehension_operations", True)]), ChoiceOption("jit_backend", "choose the backend for the JIT", @@ -337,10 +339,10 @@ '0': 'boehm nobackendopt', '1': 'boehm lowinline', 'size': 'boehm lowinline remove_asserts', - 'mem': 'markcompact lowinline remove_asserts removetypeptr', - '2': 'hybrid extraopts', - '3': 'hybrid extraopts remove_asserts', - 'jit': 'hybrid extraopts jit', + 'mem': DEFL_GC + ' lowinline remove_asserts removetypeptr', + '2': DEFL_GC + ' extraopts', + '3': DEFL_GC + ' extraopts remove_asserts', + 'jit': DEFL_GC + ' extraopts jit', } def final_check_config(config): Modified: pypy/trunk/pypy/jit/backend/llsupport/gc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/llsupport/gc.py (original) +++ pypy/trunk/pypy/jit/backend/llsupport/gc.py Mon Sep 27 18:37:56 2010 @@ -133,7 +133,7 @@ # ____________________________________________________________ -# All code below is for the hybrid GC +# All code below is for the hybrid or minimark GC class GcRefList: @@ -167,7 +167,7 @@ def alloc_gcref_list(self, n): # Important: the GRREF_LISTs allocated are *non-movable*. This - # requires support in the gc (only the hybrid GC supports it so far). + # requires support in the gc (hybrid GC or minimark GC so far). if we_are_translated(): list = rgc.malloc_nonmovable(self.GCREF_LIST, n) assert list, "malloc_nonmovable failed!" @@ -350,8 +350,9 @@ self.translator = translator self.llop1 = llop1 - # we need the hybrid GC for GcRefList.alloc_gcref_list() to work - if gcdescr.config.translation.gc != 'hybrid': + # we need the hybrid or minimark GC for GcRefList.alloc_gcref_list() + # to work + if gcdescr.config.translation.gc not in ('hybrid', 'minimark'): raise NotImplementedError("--gc=%s not implemented with the JIT" % (gcdescr.config.translation.gc,)) @@ -382,8 +383,7 @@ self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO) (self.array_basesize, _, self.array_length_ofs) = \ symbolic.get_array_token(lltype.GcArray(lltype.Signed), True) - min_ns = self.GCClass.TRANSLATION_PARAMS['min_nursery_size'] - self.max_size_of_young_obj = self.GCClass.get_young_fixedsize(min_ns) + self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() # make a malloc function, with three arguments def malloc_basic(size, tid): Modified: pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py (original) +++ pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py Mon Sep 27 18:37:56 2010 @@ -149,11 +149,12 @@ class TestFramework: + gc = 'hybrid' def setup_method(self, meth): class config_: class translation: - gc = 'hybrid' + gc = self.gc gcrootfinder = 'asmgcc' gctransformer = 'framework' gcremovetypeptr = False @@ -387,3 +388,7 @@ assert operations[1].getarg(1) == v_index assert operations[1].getarg(2) == v_value assert operations[1].getdescr() == array_descr + + +class TestFrameworkMiniMark(TestFramework): + gc = 'minimark' Modified: pypy/trunk/pypy/jit/backend/x86/test/test_zrpy_gc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/test/test_zrpy_gc.py (original) +++ pypy/trunk/pypy/jit/backend/x86/test/test_zrpy_gc.py Mon Sep 27 18:37:56 2010 @@ -18,6 +18,7 @@ from pypy.jit.backend.llsupport.gc import GcLLDescr_framework from pypy.tool.udir import udir from pypy.jit.backend.x86.arch import IS_X86_64 +from pypy.config.translationoption import DEFL_GC import py.test class X(object): @@ -126,7 +127,8 @@ # ______________________________________________________________________ -class TestCompileHybrid(object): +class TestCompileFramework(object): + # Test suite using (so far) the minimark GC. def setup_class(cls): funcs = [] name_to_func = {} @@ -175,13 +177,13 @@ OLD_DEBUG = GcLLDescr_framework.DEBUG try: GcLLDescr_framework.DEBUG = True - cls.cbuilder = compile(get_entry(allfuncs), "hybrid", + cls.cbuilder = compile(get_entry(allfuncs), DEFL_GC, gcrootfinder="asmgcc", jit=True) finally: GcLLDescr_framework.DEBUG = OLD_DEBUG def run(self, name, n=2000): - pypylog = udir.join('TestCompileHybrid.log') + pypylog = udir.join('TestCompileFramework.log') res = self.cbuilder.cmdexec("%s %d" %(name, n), env={'PYPYLOG': ':%s' % pypylog}) assert int(res) == 20 @@ -189,7 +191,7 @@ def run_orig(self, name, n, x): self.main_allfuncs(name, n, x) - def define_compile_hybrid_1(cls): + def define_compile_framework_1(cls): # a moving GC. Supports malloc_varsize_nonmovable. Simple test, works # without write_barriers and root stack enumeration. def f(n, x, *args): @@ -199,10 +201,10 @@ return (n, x) + args return None, f, None - def test_compile_hybrid_1(self): - self.run('compile_hybrid_1') + def test_compile_framework_1(self): + self.run('compile_framework_1') - def define_compile_hybrid_2(cls): + def define_compile_framework_2(cls): # More complex test, requires root stack enumeration but # not write_barriers. def f(n, x, *args): @@ -215,10 +217,10 @@ return (n, x) + args return None, f, None - def test_compile_hybrid_2(self): - self.run('compile_hybrid_2') + def test_compile_framework_2(self): + self.run('compile_framework_2') - def define_compile_hybrid_3(cls): + def define_compile_framework_3(cls): # Third version of the test. Really requires write_barriers. def f(n, x, *args): x.next = None @@ -241,13 +243,13 @@ - def test_compile_hybrid_3(self): + def test_compile_framework_3(self): x_test = X() x_test.foo = 5 - self.run_orig('compile_hybrid_3', 6, x_test) # check that it does not raise CheckError - self.run('compile_hybrid_3') + self.run_orig('compile_framework_3', 6, x_test) # check that it does not raise CheckError + self.run('compile_framework_3') - def define_compile_hybrid_3_extra(cls): + def define_compile_framework_3_extra(cls): # Extra version of the test, with tons of live vars around the residual # call that all contain a GC pointer. @dont_look_inside @@ -287,11 +289,11 @@ return n, None, x0, x1, x2, x3, x4, x5, x6, x7, None, None return before, f, None - def test_compile_hybrid_3_extra(self): - self.run_orig('compile_hybrid_3_extra', 6, None) # check that it does not raise CheckError - self.run('compile_hybrid_3_extra') + def test_compile_framework_3_extra(self): + self.run_orig('compile_framework_3_extra', 6, None) # check that it does not raise CheckError + self.run('compile_framework_3_extra') - def define_compile_hybrid_4(cls): + def define_compile_framework_4(cls): # Fourth version of the test, with __del__. from pypy.rlib.debug import debug_print class Counter: @@ -311,10 +313,10 @@ return (n, x) + args return before, f, None - def test_compile_hybrid_4(self): - self.run('compile_hybrid_4') + def test_compile_framework_4(self): + self.run('compile_framework_4') - def define_compile_hybrid_5(cls): + def define_compile_framework_5(cls): # Test string manipulation. def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): n -= x.foo @@ -324,10 +326,10 @@ check(len(s) == 1*5 + 2*45 + 3*450 + 4*500) return None, f, after - def test_compile_hybrid_5(self): - self.run('compile_hybrid_5') + def test_compile_framework_5(self): + self.run('compile_framework_5') - def define_compile_hybrid_7(cls): + def define_compile_framework_7(cls): # Array of pointers (test the write barrier for setarrayitem_gc) def before(n, x): return n, x, None, None, None, None, None, None, None, None, [X(123)], None @@ -391,10 +393,10 @@ check(l[15].x == 142) return before, f, after - def test_compile_hybrid_7(self): - self.run('compile_hybrid_7') + def test_compile_framework_7(self): + self.run('compile_framework_7') - def define_compile_hybrid_external_exception_handling(cls): + def define_compile_framework_external_exception_handling(cls): def before(n, x): x = X(0) return n, x, None, None, None, None, None, None, None, None, None, None @@ -427,10 +429,10 @@ return before, f, None - def test_compile_hybrid_external_exception_handling(self): - self.run('compile_hybrid_external_exception_handling') + def test_compile_framework_external_exception_handling(self): + self.run('compile_framework_external_exception_handling') - def define_compile_hybrid_bug1(self): + def define_compile_framework_bug1(self): @purefunction def nonmoving(): x = X(1) @@ -453,10 +455,10 @@ return None, f, None - def test_compile_hybrid_bug1(self): - self.run('compile_hybrid_bug1', 200) + def test_compile_framework_bug1(self): + self.run('compile_framework_bug1', 200) - def define_compile_hybrid_vref(self): + def define_compile_framework_vref(self): from pypy.rlib.jit import virtual_ref, virtual_ref_finish class A: pass @@ -469,10 +471,10 @@ return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s return None, f, None - def test_compile_hybrid_vref(self): - self.run('compile_hybrid_vref', 200) + def test_compile_framework_vref(self): + self.run('compile_framework_vref', 200) - def define_compile_hybrid_float(self): + def define_compile_framework_float(self): # test for a bug: the fastpath_malloc does not save and restore # xmm registers around the actual call to the slow path class A: @@ -519,5 +521,5 @@ return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s return None, f, None - def test_compile_hybrid_float(self): - self.run('compile_hybrid_float') + def test_compile_framework_float(self): + self.run('compile_framework_float') Modified: pypy/trunk/pypy/jit/backend/x86/test/test_ztranslation.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/test/test_ztranslation.py (original) +++ pypy/trunk/pypy/jit/backend/x86/test/test_ztranslation.py Mon Sep 27 18:37:56 2010 @@ -8,6 +8,7 @@ from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.translator.translator import TranslationContext from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64 +from pypy.config.translationoption import DEFL_GC class TestTranslationX86(CCompiledMixin): CPUClass = getcpuclass() @@ -118,7 +119,7 @@ def _get_TranslationContext(self): t = TranslationContext() - t.config.translation.gc = 'hybrid' + t.config.translation.gc = DEFL_GC # 'hybrid' or 'minimark' t.config.translation.gcrootfinder = 'asmgcc' t.config.translation.list_comprehension_operations = True t.config.translation.gcremovetypeptr = True Modified: pypy/trunk/pypy/jit/metainterp/gc.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/gc.py (original) +++ pypy/trunk/pypy/jit/metainterp/gc.py Mon Sep 27 18:37:56 2010 @@ -19,6 +19,9 @@ class GC_hybrid(GcDescription): malloc_zero_filled = True +class GC_minimark(GcDescription): + malloc_zero_filled = True + def get_description(config): name = config.translation.gc Modified: pypy/trunk/pypy/rpython/memory/gc/generation.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/generation.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/generation.py Mon Sep 27 18:37:56 2010 @@ -147,6 +147,11 @@ def get_young_var_basesize(nursery_size): return nursery_size // 4 - 1 + @classmethod + def JIT_max_size_of_young_obj(cls): + min_nurs_size = cls.TRANSLATION_PARAMS['min_nursery_size'] + return cls.get_young_fixedsize(min_nurs_size) + def is_in_nursery(self, addr): ll_assert(llmemory.cast_adr_to_int(addr) & 1 == 0, "odd-valued (i.e. tagged) pointer unexpected here") Modified: pypy/trunk/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/minimark.py Mon Sep 27 18:37:56 2010 @@ -719,6 +719,10 @@ # "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS + @classmethod + def JIT_max_size_of_young_obj(cls): + return cls.TRANSLATION_PARAMS['large_object'] + def write_barrier(self, addr_struct): if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS: self.remember_young_pointer(addr_struct) Modified: pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py Mon Sep 27 18:37:56 2010 @@ -336,7 +336,7 @@ def _start_of_page_untranslated(addr, page_size): assert isinstance(addr, llarena.fakearenaaddress) - shift = 4 # for testing, we assume that the whole arena is not + shift = WORD # for testing, we assume that the whole arena is not # on a page boundary ofs = ((addr.offset - shift) // page_size) * page_size + shift return llarena.fakearenaaddress(addr.arena, ofs) Modified: pypy/trunk/pypy/rpython/memory/gc/test/test_minimarkpage.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/test/test_minimarkpage.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/test/test_minimarkpage.py Mon Sep 27 18:37:56 2010 @@ -7,22 +7,22 @@ from pypy.rpython.lltypesystem.llmemory import cast_ptr_to_adr NULL = llmemory.NULL -SHIFT = 4 +SHIFT = WORD hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) def test_allocate_arena(): - ac = ArenaCollection(SHIFT + 8*20, 8, 1) + ac = ArenaCollection(SHIFT + 16*20, 16, 1) ac.allocate_new_arena() assert ac.num_uninitialized_pages == 20 - ac.uninitialized_pages + 8*20 # does not raise - py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 8*20 + 1") + ac.uninitialized_pages + 16*20 # does not raise + py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 16*20 + 1") # - ac = ArenaCollection(SHIFT + 8*20 + 7, 8, 1) + ac = ArenaCollection(SHIFT + 16*20 + 7, 16, 1) ac.allocate_new_arena() assert ac.num_uninitialized_pages == 20 - ac.uninitialized_pages + 8*20 + 7 # does not raise - py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 8*20 + 8") + ac.uninitialized_pages + 16*20 + 7 # does not raise + py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 16*20 + 16") def test_allocate_new_page(): Modified: pypy/trunk/pypy/rpython/memory/test/test_gc.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/test/test_gc.py (original) +++ pypy/trunk/pypy/rpython/memory/test/test_gc.py Mon Sep 27 18:37:56 2010 @@ -29,7 +29,7 @@ GC_CAN_MALLOC_NONMOVABLE = True GC_CAN_SHRINK_ARRAY = False GC_CAN_SHRINK_BIG_ARRAY = False - BUT_HOW_BIG_IS_A_BIG_STRING = 12 + BUT_HOW_BIG_IS_A_BIG_STRING = 3*WORD def setup_class(cls): cls._saved_logstate = py.log._getstate() From afa at codespeak.net Tue Sep 28 00:14:02 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 28 Sep 2010 00:14:02 +0200 (CEST) Subject: [pypy-svn] r77420 - in pypy/branch/fast-forward/pypy/rlib: . test Message-ID: <20100927221402.79812282C06@codespeak.net> Author: afa Date: Tue Sep 28 00:14:00 2010 New Revision: 77420 Modified: pypy/branch/fast-forward/pypy/rlib/runicode.py pypy/branch/fast-forward/pypy/rlib/test/test_runicode.py Log: implement utf-32 encoders and decoders Modified: pypy/branch/fast-forward/pypy/rlib/runicode.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/runicode.py (original) +++ pypy/branch/fast-forward/pypy/rlib/runicode.py Tue Sep 28 00:14:00 2010 @@ -270,7 +270,6 @@ if errorhandler is None: errorhandler = raise_unicode_exception_decode bo = 0 - consumed = 0 if BYTEORDER == 'little': ihi = 1 @@ -419,6 +418,162 @@ # ____________________________________________________________ +# utf-32 + +def str_decode_utf_32(s, size, errors, final=True, + errorhandler=None): + result, length, byteorder = str_decode_utf_32_helper(s, size, errors, final, + errorhandler, "native") + return result, length + +def str_decode_utf_32_be(s, size, errors, final=True, + errorhandler=None): + result, length, byteorder = str_decode_utf_32_helper(s, size, errors, final, + errorhandler, "big") + return result, length + +def str_decode_utf_32_le(s, size, errors, final=True, + errorhandler=None): + result, length, byteorder = str_decode_utf_32_helper(s, size, errors, final, + errorhandler, "little") + return result, length + +def str_decode_utf_32_helper(s, size, errors, final=True, + errorhandler=None, + byteorder="native"): + if errorhandler is None: + errorhandler = raise_unicode_exception_decode + bo = 0 + + if BYTEORDER == 'little': + iorder = [0, 1, 2, 3] + else: + iorder = [3, 2, 1, 0] + + # Check for BOM marks (U+FEFF) in the input and adjust current + # byte order setting accordingly. In native mode, the leading BOM + # mark is skipped, in all other modes, it is copied to the output + # stream as-is (giving a ZWNBSP character). + pos = 0 + if byteorder == 'native': + if size >= 4: + bom = ((ord(s[iorder[3]]) << 24) | (ord(s[iorder[2]]) << 16) | + (ord(s[iorder[1]]) << 8) | ord(s[iorder[0]])) + if BYTEORDER == 'little': + if bom == 0x0000FEFF: + pos += 4 + bo = -1 + elif bom == 0xFFFE0000: + pos += 4 + bo = 1 + else: + if bom == 0x0000FEFF: + pos += 2 + bo = 1 + elif bom == 0xFFFE0000: + pos += 2 + bo = -1 + elif byteorder == 'little': + bo = -1 + else: + bo = 1 + if size == 0: + return u'', 0, bo + if bo == -1: + # force little endian + iorder = [0, 1, 2, 3] + + elif bo == 1: + # force big endian + iorder = [3, 2, 1, 0] + + result = UnicodeBuilder(size // 4) + + while pos < size: + # remaining bytes at the end? (size should be divisible by 4) + if len(s) - pos < 4: + if not final: + break + r, pos = errorhandler(errors, 'utf-32', "truncated data", + s, pos, len(s)) + result.append(r) + if len(s) - pos < 4: + break + continue + ch = ((ord(s[pos + iorder[3]]) << 24) | (ord(s[pos + iorder[2]]) << 16) | + (ord(s[pos + iorder[1]]) << 8) | ord(s[pos + iorder[0]])) + if ch >= 0x110000: + r, pos = errorhandler(errors, 'utf-32', "codepoint not in range(0x110000)", + s, pos, len(s)) + result.append(r) + continue + + if MAXUNICODE < 65536 and ch >= 0x10000: + ch -= 0x10000L + result.append(unichr(0xD800 + (ch >> 10))) + result.append(unichr(0xDC00 + (ch & 0x03FF))) + else: + result.append(UNICHR(ch)) + pos += 4 + return result.build(), pos, bo + +def _STORECHAR32(result, CH, byteorder): + c0 = chr(((CH) >> 24) & 0xff) + c1 = chr(((CH) >> 16) & 0xff) + c2 = chr(((CH) >> 8) & 0xff) + c3 = chr((CH) & 0xff) + if byteorder == 'little': + result.append(c3) + result.append(c2) + result.append(c1) + result.append(c0) + else: + result.append(c0) + result.append(c1) + result.append(c2) + result.append(c3) + +def unicode_encode_utf_32_helper(s, size, errors, + errorhandler=None, + byteorder='little'): + if size == 0: + return "" + + result = StringBuilder(size * 4 + 4) + if byteorder == 'native': + _STORECHAR32(result, 0xFEFF, BYTEORDER) + byteorder = BYTEORDER + + i = 0 + while i < size: + ch = ord(s[i]) + i += 1 + ch2 = 0 + if MAXUNICODE < 65536 and 0xD800 <= ch <= 0xDBFF and i < size: + ch2 = ord(s[i]) + if 0xDC00 <= ch2 <= 0xDFFF: + ch = (((ch & 0x3FF)<<10) | (ch2 & 0x3FF)) + 0x10000; + i += 1 + _STORECHAR32(result, ch, byteorder) + + return result.build() + +def unicode_encode_utf_32(s, size, errors, + errorhandler=None): + return unicode_encode_utf_32_helper(s, size, errors, errorhandler, "native") + + +def unicode_encode_utf_32_be(s, size, errors, + errorhandler=None): + return unicode_encode_utf_32_helper(s, size, errors, errorhandler, "big") + + +def unicode_encode_utf_32_le(s, size, errors, + errorhandler=None): + return unicode_encode_utf_32_helper(s, size, errors, errorhandler, "little") + + +# ____________________________________________________________ # utf-7 ## indicate whether a UTF-7 character is special i.e. cannot be directly Modified: pypy/branch/fast-forward/pypy/rlib/test/test_runicode.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/test/test_runicode.py (original) +++ pypy/branch/fast-forward/pypy/rlib/test/test_runicode.py Tue Sep 28 00:14:00 2010 @@ -76,7 +76,7 @@ assert start == startingpos assert stop == endingpos return u"42424242", stop - return "", endingpos + return u"", endingpos decoder = self.getdecoder(encoding) if addstuff: s += "some rest in ascii" @@ -99,12 +99,14 @@ def test_all_first_256(self): for i in range(256): - for encoding in "utf-8 latin-1 utf-16 utf-16-be utf-16-le".split(): + for encoding in ("utf-8 latin-1 utf-16 utf-16-be utf-16-le " + "utf-32 utf-32-be utf-32-le").split(): self.checkdecode(unichr(i), encoding) def test_first_10000(self): for i in range(10000): - for encoding in "utf-8 utf-16 utf-16-be utf-16-le".split(): + for encoding in ("utf-8 utf-16 utf-16-be utf-16-le " + "utf-32 utf-32-be utf-32-le").split(): self.checkdecode(unichr(i), encoding) def test_random(self): @@ -113,13 +115,15 @@ if 0xd800 <= v <= 0xdfff: continue uni = unichr(v) - for encoding in "utf-8 utf-16 utf-16-be utf-16-le".split(): - self.checkdecode(uni, encoding) + for encoding in ("utf-8 utf-16 utf-16-be utf-16-le " + "utf-32 utf-32-be utf-32-le").split(): + self.checkdecode(uni, encoding) def test_maxunicode(self): uni = unichr(sys.maxunicode) - for encoding in "utf-8 utf-16 utf-16-be utf-16-le".split(): - self.checkdecode(uni, encoding) + for encoding in ("utf-8 utf-16 utf-16-be utf-16-le " + "utf-32 utf-32-be utf-32-le").split(): + self.checkdecode(uni, encoding) def test_single_chars_utf8(self): for s in ["\xd7\x90", "\xd6\x96", "\xeb\x96\x95", "\xf0\x90\x91\x93"]: @@ -179,12 +183,14 @@ def test_all_first_256(self): for i in range(256): - for encoding in "utf-8 latin-1 utf-16 utf-16-be utf-16-le".split(): + for encoding in ("utf-8 utf-16 utf-16-be utf-16-le " + "utf-32 utf-32-be utf-32-le").split(): self.checkencode(unichr(i), encoding) def test_first_10000(self): for i in range(10000): - for encoding in "utf-8 utf-16 utf-16-be utf-16-le".split(): + for encoding in ("utf-8 utf-16 utf-16-be utf-16-le " + "utf-32 utf-32-be utf-32-le").split(): self.checkencode(unichr(i), encoding) def test_random(self): @@ -193,12 +199,14 @@ if 0xd800 <= v <= 0xdfff: continue uni = unichr(v) - for encoding in "utf-8 utf-16 utf-16-be utf-16-le".split(): + for encoding in ("utf-8 utf-16 utf-16-be utf-16-le " + "utf-32 utf-32-be utf-32-le").split(): self.checkencode(uni, encoding) def test_maxunicode(self): uni = unichr(sys.maxunicode) - for encoding in "utf-8 utf-16 utf-16-be utf-16-le".split(): + for encoding in ("utf-8 utf-16 utf-16-be utf-16-le " + "utf-32 utf-32-be utf-32-le").split(): self.checkencode(uni, encoding) def test_single_chars_utf8(self): From afa at codespeak.net Tue Sep 28 00:18:07 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 28 Sep 2010 00:18:07 +0200 (CEST) Subject: [pypy-svn] r77421 - in pypy/branch/fast-forward/pypy/module/_codecs: . test Message-ID: <20100927221807.C9EEB282C06@codespeak.net> Author: afa Date: Tue Sep 28 00:18:06 2010 New Revision: 77421 Modified: pypy/branch/fast-forward/pypy/module/_codecs/__init__.py pypy/branch/fast-forward/pypy/module/_codecs/interp_codecs.py pypy/branch/fast-forward/pypy/module/_codecs/test/test_codecs.py Log: Export utf_32 functions in the _codecs module Modified: pypy/branch/fast-forward/pypy/module/_codecs/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_codecs/__init__.py (original) +++ pypy/branch/fast-forward/pypy/module/_codecs/__init__.py Tue Sep 28 00:18:06 2010 @@ -65,6 +65,13 @@ 'utf_16_le_decode' : 'interp_codecs.utf_16_le_decode', 'utf_16_le_encode' : 'interp_codecs.utf_16_le_encode', 'utf_16_ex_decode' : 'interp_codecs.utf_16_ex_decode', + 'utf_32_decode' : 'interp_codecs.utf_32_decode', + 'utf_32_encode' : 'interp_codecs.utf_32_encode', + 'utf_32_be_decode' : 'interp_codecs.utf_32_be_decode', + 'utf_32_be_encode' : 'interp_codecs.utf_32_be_encode', + 'utf_32_le_decode' : 'interp_codecs.utf_32_le_decode', + 'utf_32_le_encode' : 'interp_codecs.utf_32_le_encode', + 'utf_32_ex_decode' : 'interp_codecs.utf_32_ex_decode', 'charbuffer_encode': 'interp_codecs.buffer_encode', 'readbuffer_encode': 'interp_codecs.buffer_encode', 'charmap_decode' : 'interp_codecs.charmap_decode', Modified: pypy/branch/fast-forward/pypy/module/_codecs/interp_codecs.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_codecs/interp_codecs.py (original) +++ pypy/branch/fast-forward/pypy/module/_codecs/interp_codecs.py Tue Sep 28 00:18:06 2010 @@ -410,6 +410,9 @@ "utf_16_encode", "utf_16_be_encode", "utf_16_le_encode", + "utf_32_encode", + "utf_32_be_encode", + "utf_32_le_encode", "unicode_escape_encode", "raw_unicode_escape_encode", "unicode_internal_encode", @@ -424,6 +427,9 @@ "utf_16_decode", "utf_16_be_decode", "utf_16_le_decode", + "utf_32_decode", + "utf_32_be_decode", + "utf_32_le_decode", "raw_unicode_escape_decode", ]: make_decoder_wrapper(decoders) @@ -450,6 +456,24 @@ space.wrap(byteorder)]) utf_16_ex_decode.unwrap_spec = [ObjSpace, str, str, int, W_Root] +def utf_32_ex_decode(space, data, errors='strict', byteorder=0, w_final=False): + final = space.is_true(w_final) + state = space.fromcache(CodecState) + if byteorder == 0: + byteorder = 'native' + elif byteorder == -1: + byteorder = 'little' + else: + byteorder = 'big' + consumed = len(data) + if final: + consumed = 0 + res, consumed, byteorder = runicode.str_decode_utf_32_helper( + data, len(data), errors, final, state.decode_error_handler, byteorder) + return space.newtuple([space.wrap(res), space.wrap(consumed), + space.wrap(byteorder)]) +utf_32_ex_decode.unwrap_spec = [ObjSpace, str, str, int, W_Root] + # ____________________________________________________________ # Charmap Modified: pypy/branch/fast-forward/pypy/module/_codecs/test/test_codecs.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/_codecs/test/test_codecs.py (original) +++ pypy/branch/fast-forward/pypy/module/_codecs/test/test_codecs.py Tue Sep 28 00:18:06 2010 @@ -14,6 +14,7 @@ def test_bigU_codecs(self): u = u'\U00010001\U00020002\U00030003\U00040004\U00050005' for encoding in ('utf-8', 'utf-16', 'utf-16-le', 'utf-16-be', + 'utf-32', 'utf-32-le', 'utf-32-be', 'raw_unicode_escape', 'unicode_escape', 'unicode_internal'): assert unicode(u.encode(encoding),encoding) == u From afa at codespeak.net Tue Sep 28 00:26:48 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 28 Sep 2010 00:26:48 +0200 (CEST) Subject: [pypy-svn] r77422 - pypy/branch/fast-forward/lib_pypy Message-ID: <20100927222648.9477A282C06@codespeak.net> Author: afa Date: Tue Sep 28 00:26:47 2010 New Revision: 77422 Modified: pypy/branch/fast-forward/lib_pypy/hashlib.py Log: expose hashlib.algorithms Modified: pypy/branch/fast-forward/lib_pypy/hashlib.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/hashlib.py (original) +++ pypy/branch/fast-forward/lib_pypy/hashlib.py Tue Sep 28 00:26:47 2010 @@ -108,14 +108,12 @@ new = __hash_new +algorithms = ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'] + def __getfunc(name): def new(string=''): return __hash_new(name, string) return new -md5 = __getfunc('md5') -sha1 = __getfunc('sha1') -sha224 = __getfunc('sha224') -sha256 = __getfunc('sha256') -sha384 = __getfunc('sha384') -sha512 = __getfunc('sha512') +for __name in algorithms: + globals()[name] = __getfunc(__name) From afa at codespeak.net Tue Sep 28 00:53:14 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 28 Sep 2010 00:53:14 +0200 (CEST) Subject: [pypy-svn] r77423 - in pypy/branch/fast-forward/pypy/module/itertools: . test Message-ID: <20100927225314.7BC26282C45@codespeak.net> Author: afa Date: Tue Sep 28 00:53:12 2010 New Revision: 77423 Modified: pypy/branch/fast-forward/pypy/module/itertools/interp_itertools.py pypy/branch/fast-forward/pypy/module/itertools/test/test_itertools.py Log: - itertools.chain() is now lazy: its iterables are not checked by the constructor, but when they are consumed. - add itertools.from_iterable() Modified: pypy/branch/fast-forward/pypy/module/itertools/interp_itertools.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/itertools/interp_itertools.py (original) +++ pypy/branch/fast-forward/pypy/module/itertools/interp_itertools.py Tue Sep 28 00:53:12 2010 @@ -372,49 +372,30 @@ class W_Chain(Wrappable): - def __init__(self, space, args_w): + def __init__(self, space, w_iterables): self.space = space - iterators_w = [] - i = 0 - for iterable_w in args_w: - try: - iterator_w = space.iter(iterable_w) - except OperationError, e: - if e.match(self.space, self.space.w_TypeError): - raise OperationError(space.w_TypeError, space.wrap("chain argument #" + str(i + 1) + " must support iteration")) - else: - raise - else: - iterators_w.append(iterator_w) - - i += 1 - - self.iterators_w = iterators_w - self.current_iterator = 0 - self.num_iterators = len(iterators_w) - self.started = False + self.w_iterables = w_iterables + self.w_it = None def iter_w(self): return self.space.wrap(self) + def _advance(self): + self.w_it = self.space.iter(self.space.next(self.w_iterables)) + def next_w(self): - if self.current_iterator >= self.num_iterators: + if not self.w_iterables: + # already stopped raise OperationError(self.space.w_StopIteration, self.space.w_None) - if not self.started: - self.current_iterator = 0 - self.w_it = self.iterators_w[self.current_iterator] - self.started = True + if not self.w_it: + self._advance() while True: try: w_obj = self.space.next(self.w_it) except OperationError, e: if e.match(self.space, self.space.w_StopIteration): - self.current_iterator += 1 - if self.current_iterator >= self.num_iterators: - raise OperationError(self.space.w_StopIteration, self.space.w_None) - else: - self.w_it = self.iterators_w[self.current_iterator] + self._advance() # may raise StopIteration itself else: raise else: @@ -422,13 +403,23 @@ return w_obj def W_Chain___new__(space, w_subtype, args_w): - return space.wrap(W_Chain(space, args_w)) + w_args = space.newtuple(args_w) + return space.wrap(W_Chain(space, space.iter(w_args))) + +def chain_from_iterable(space, w_cls, w_arg): + """chain.from_iterable(iterable) --> chain object + + Alternate chain() contructor taking a single iterable argument + that evaluates lazily.""" + return space.wrap(W_Chain(space, space.iter(w_arg))) W_Chain.typedef = TypeDef( 'chain', __new__ = interp2app(W_Chain___new__, unwrap_spec=[ObjSpace, W_Root, 'args_w']), __iter__ = interp2app(W_Chain.iter_w, unwrap_spec=['self']), next = interp2app(W_Chain.next_w, unwrap_spec=['self']), + from_iterable = interp2app(chain_from_iterable, unwrap_spec=[ObjSpace, W_Root, W_Root], + as_classmethod=True), __doc__ = """Make an iterator that returns elements from the first iterable until it is exhausted, then proceeds to the next iterable, until all of the iterables are exhausted. Used for treating consecutive Modified: pypy/branch/fast-forward/pypy/module/itertools/test/test_itertools.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/itertools/test/test_itertools.py (original) +++ pypy/branch/fast-forward/pypy/module/itertools/test/test_itertools.py Tue Sep 28 00:53:12 2010 @@ -271,21 +271,11 @@ assert it.next() == 1 raises(StopIteration, it.next) - def test_chain_wrongargs(self): + def test_chain_fromiterable(self): import itertools - - raises(TypeError, itertools.chain, None) - raises(TypeError, itertools.chain, [], None) - - # The error message should indicate which argument was dodgy - for x in range(10): - args = [()] * x + [None] + [()] * (9 - x) - try: - itertools.chain(*args) - except TypeError, e: - assert str(e).find("#" + str(x + 1) + " ") >= 0 - else: - fail("TypeError expected") + l = [[1, 2, 3], [4], [5, 6]] + it = itertools.chain.from_iterable(l) + assert list(it) == sum(l, []) def test_imap(self): import itertools From afa at codespeak.net Tue Sep 28 00:58:27 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 28 Sep 2010 00:58:27 +0200 (CEST) Subject: [pypy-svn] r77424 - pypy/branch/fast-forward/lib-python Message-ID: <20100927225827.ECC93282C06@codespeak.net> Author: afa Date: Tue Sep 28 00:58:26 2010 New Revision: 77424 Modified: pypy/branch/fast-forward/lib-python/TODO Log: One more medium-to-large task Modified: pypy/branch/fast-forward/lib-python/TODO ============================================================================== --- pypy/branch/fast-forward/lib-python/TODO (original) +++ pypy/branch/fast-forward/lib-python/TODO Tue Sep 28 00:58:26 2010 @@ -36,6 +36,8 @@ - Finish _multiprocessing +- Update the _ssl module + More difficult issues --------------------- From afa at codespeak.net Tue Sep 28 01:09:13 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 28 Sep 2010 01:09:13 +0200 (CEST) Subject: [pypy-svn] r77425 - pypy/branch/fast-forward/lib-python Message-ID: <20100927230913.2B668282C06@codespeak.net> Author: afa Date: Tue Sep 28 01:09:11 2010 New Revision: 77425 Modified: pypy/branch/fast-forward/lib-python/TODO Log: More todo Modified: pypy/branch/fast-forward/lib-python/TODO ============================================================================== --- pypy/branch/fast-forward/lib-python/TODO (original) +++ pypy/branch/fast-forward/lib-python/TODO Tue Sep 28 01:09:11 2010 @@ -10,6 +10,8 @@ - Octal literals: 0o777 +- float('infinity'), float('nan') + - Seen in test_inspect, this has never worked in pypy:: assert eval('a', None, dict(a=42)) == 42 @@ -28,6 +30,9 @@ - Ast objects should be picklable, see in pypy/module/_ast/test/test_ast.py: test_pickle() +- "exceptions must be old-style classes or derived from BaseException, not str" + in the 'raise' statement and generator.throw() + Longer tasks ------------ @@ -36,7 +41,7 @@ - Finish _multiprocessing -- Update the _ssl module +- Update the _ssl module (entry point is now _ssl.sslwrap) More difficult issues --------------------- From agaynor at codespeak.net Tue Sep 28 07:00:55 2010 From: agaynor at codespeak.net (agaynor at codespeak.net) Date: Tue, 28 Sep 2010 07:00:55 +0200 (CEST) Subject: [pypy-svn] r77426 - pypy/branch/fast-forward/pypy/objspace/std Message-ID: <20100928050055.1E336282C06@codespeak.net> Author: agaynor Date: Tue Sep 28 07:00:53 2010 New Revision: 77426 Modified: pypy/branch/fast-forward/pypy/objspace/std/inttype.py Log: Added real and imag properties to ints. Modified: pypy/branch/fast-forward/pypy/objspace/std/inttype.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/inttype.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/inttype.py Tue Sep 28 07:00:53 2010 @@ -140,6 +140,12 @@ def descr_get_denominator(space, w_obj): return space.wrap(1) +def descr_get_real(space, w_obj): + return w_obj + +def descr_get_imag(space, w_obj): + return space.wrap(0) + # ____________________________________________________________ int_typedef = StdTypeDef("int", @@ -154,4 +160,6 @@ __new__ = gateway.interp2app(descr__new__), numerator = typedef.GetSetProperty(descr_get_numerator), denominator = typedef.GetSetProperty(descr_get_denominator), - ) + real = typedef.GetSetProperty(descr_get_real), + imag = typedef.GetSetProperty(descr_get_imag), +) From afa at codespeak.net Tue Sep 28 09:46:54 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 28 Sep 2010 09:46:54 +0200 (CEST) Subject: [pypy-svn] r77427 - pypy/branch/fast-forward/pypy/rlib Message-ID: <20100928074654.1D6DB282C06@codespeak.net> Author: afa Date: Tue Sep 28 09:46:52 2010 New Revision: 77427 Modified: pypy/branch/fast-forward/pypy/rlib/runicode.py Log: Fix translation: large unsigned ints are not RPython. Modified: pypy/branch/fast-forward/pypy/rlib/runicode.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/runicode.py (original) +++ pypy/branch/fast-forward/pypy/rlib/runicode.py Tue Sep 28 09:46:52 2010 @@ -3,7 +3,7 @@ from pypy.rpython.lltypesystem import lltype, rffi from pypy.rlib.objectmodel import we_are_translated, specialize from pypy.rlib.rstring import StringBuilder, UnicodeBuilder -from pypy.rlib.rarithmetic import r_uint +from pypy.rlib.rarithmetic import r_uint, intmask if rffi.sizeof(lltype.UniChar) == 4: MAXUNICODE = 0x10ffff @@ -438,6 +438,9 @@ errorhandler, "little") return result, length +BOM32_DIRECT = intmask(0x0000FEFF) +BOM32_REVERSE = intmask(0xFFFE0000) + def str_decode_utf_32_helper(s, size, errors, final=True, errorhandler=None, byteorder="native"): @@ -460,18 +463,18 @@ bom = ((ord(s[iorder[3]]) << 24) | (ord(s[iorder[2]]) << 16) | (ord(s[iorder[1]]) << 8) | ord(s[iorder[0]])) if BYTEORDER == 'little': - if bom == 0x0000FEFF: + if bom == BOM32_DIRECT: pos += 4 bo = -1 - elif bom == 0xFFFE0000: + elif bom == BOM32_REVERSE: pos += 4 bo = 1 else: - if bom == 0x0000FEFF: - pos += 2 + if bom == BOM32_DIRECT: + pos += 4 bo = 1 - elif bom == 0xFFFE0000: - pos += 2 + elif bom == BOM32_REVERSE: + pos += 4 bo = -1 elif byteorder == 'little': bo = -1 From antocuni at codespeak.net Tue Sep 28 11:34:25 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Tue, 28 Sep 2010 11:34:25 +0200 (CEST) Subject: [pypy-svn] r77428 - pypy/branch/jitffi/pypy/jit/metainterp Message-ID: <20100928093425.545EB282BDD@codespeak.net> Author: antocuni Date: Tue Sep 28 11:34:24 2010 New Revision: 77428 Modified: pypy/branch/jitffi/pypy/jit/metainterp/graphpage.py Log: aargh, my fault (in the resoperation-refactoring branch)! We cannot mutate the arglist just because we are displaying it in the graph viewer Modified: pypy/branch/jitffi/pypy/jit/metainterp/graphpage.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/graphpage.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/graphpage.py Tue Sep 28 11:34:24 2010 @@ -191,8 +191,7 @@ def getlinks(self): boxes = {} for op in self.all_operations: - args = op.getarglist() - args.append(op.result) + args = op.getarglist() + [op.result] for box in args: if getattr(box, 'is_box', False): boxes[box] = True From antocuni at codespeak.net Tue Sep 28 11:36:11 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Tue, 28 Sep 2010 11:36:11 +0200 (CEST) Subject: [pypy-svn] r77429 - pypy/trunk/pypy/jit/metainterp Message-ID: <20100928093611.A6B30282BDD@codespeak.net> Author: antocuni Date: Tue Sep 28 11:36:10 2010 New Revision: 77429 Modified: pypy/trunk/pypy/jit/metainterp/graphpage.py Log: aargh, my fault (in the resoperation-refactoring branch)! We cannot mutate the arglist just because we are displaying it in the graph viewer Modified: pypy/trunk/pypy/jit/metainterp/graphpage.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/graphpage.py (original) +++ pypy/trunk/pypy/jit/metainterp/graphpage.py Tue Sep 28 11:36:10 2010 @@ -191,8 +191,7 @@ def getlinks(self): boxes = {} for op in self.all_operations: - args = op.getarglist() - args.append(op.result) + args = op.getarglist() + [op.result] for box in args: if getattr(box, 'is_box', False): boxes[box] = True From cfbolz at codespeak.net Tue Sep 28 11:37:20 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 28 Sep 2010 11:37:20 +0200 (CEST) Subject: [pypy-svn] r77430 - in pypy/extradoc/talk/pepm2011: . figures Message-ID: <20100928093720.A7C48282BDD@codespeak.net> Author: cfbolz Date: Tue Sep 28 11:37:18 2010 New Revision: 77430 Removed: pypy/extradoc/talk/pepm2011/figures/Makefile Modified: pypy/extradoc/talk/pepm2011/Makefile Log: handle tikz figures in the top-level Makefile Modified: pypy/extradoc/talk/pepm2011/Makefile ============================================================================== --- pypy/extradoc/talk/pepm2011/Makefile (original) +++ pypy/extradoc/talk/pepm2011/Makefile Tue Sep 28 11:37:18 2010 @@ -1,5 +1,5 @@ -escape-tracing.pdf: paper.tex paper.bib +escape-tracing.pdf: paper.tex paper.bib figures/step1.pdf figures/step2.pdf figures/step3.pdf figures/step4.pdf pdflatex paper bibtex paper pdflatex paper @@ -21,3 +21,6 @@ %.pdf: %.eps epstopdf $< + +%.pdf: %.tikz + tikz2pdf -s $< From cfbolz at codespeak.net Tue Sep 28 11:37:38 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 28 Sep 2010 11:37:38 +0200 (CEST) Subject: [pypy-svn] r77431 - pypy/extradoc/talk/pepm2011 Message-ID: <20100928093738.D1779282BDD@codespeak.net> Author: cfbolz Date: Tue Sep 28 11:37:36 2010 New Revision: 77431 Modified: pypy/extradoc/talk/pepm2011/Makefile Log: preserve comment Modified: pypy/extradoc/talk/pepm2011/Makefile ============================================================================== --- pypy/extradoc/talk/pepm2011/Makefile (original) +++ pypy/extradoc/talk/pepm2011/Makefile Tue Sep 28 11:37:36 2010 @@ -1,3 +1,4 @@ +# for tikz2pdf: http://codespeak.net/svn/user/antocuni/bin/tikz2pdf escape-tracing.pdf: paper.tex paper.bib figures/step1.pdf figures/step2.pdf figures/step3.pdf figures/step4.pdf pdflatex paper From cfbolz at codespeak.net Tue Sep 28 11:51:31 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 28 Sep 2010 11:51:31 +0200 (CEST) Subject: [pypy-svn] r77432 - in pypy/extradoc/talk/pepm2011: . figures Message-ID: <20100928095131.95069282BDD@codespeak.net> Author: cfbolz Date: Tue Sep 28 11:51:29 2010 New Revision: 77432 Modified: pypy/extradoc/talk/pepm2011/figures/step1.pdf pypy/extradoc/talk/pepm2011/figures/step1.tikz pypy/extradoc/talk/pepm2011/figures/step2.pdf pypy/extradoc/talk/pepm2011/figures/step2.tikz pypy/extradoc/talk/pepm2011/figures/step3.pdf pypy/extradoc/talk/pepm2011/figures/step3.tikz pypy/extradoc/talk/pepm2011/figures/step4.pdf pypy/extradoc/talk/pepm2011/figures/step4.tikz pypy/extradoc/talk/pepm2011/paper.tex Log: convert all traces to use math style. Modified: pypy/extradoc/talk/pepm2011/figures/step1.pdf ============================================================================== Binary files. No diff available. Modified: pypy/extradoc/talk/pepm2011/figures/step1.tikz ============================================================================== --- pypy/extradoc/talk/pepm2011/figures/step1.tikz (original) +++ pypy/extradoc/talk/pepm2011/figures/step1.tikz Tue Sep 28 11:51:29 2010 @@ -1,7 +1,7 @@ % -*- mode: latex; auto-revert-interval: 0.5 -*- \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto, node distance=4em, - very thin,font={\scriptsize\bf}] + very thin,font={\scriptsize \tt}] \pgfdeclarelayer{background} \pgfdeclarelayer{foreground} @@ -20,39 +20,39 @@ \clip (-3.5, 1.3) rectangle (2.5,-9.3); % plain nodes - \node[enter, name=start] {loop($p_0$, $p_1$)} ; + \node[enter, name=start] {loop[$p_0$, $p_1$]:} ; \node[guard, name=block1, below of=start, node distance=5em] { - \begin{tabular}{c} + \begin{tabular}{l} guard\_class($p_1$, BoxedInteger) \\ - $i_2$ = getfield\_gc($p_1$, intval) \\ + $i_2$ = getfield($p_1$, intval) \\ guard\_class($p_0$, BoxedInteger) \\ - $i_3$ = getfield\_gc($p_0$, intval) \\ + $i_3$ = getfield($p_0$, intval) \\ $i_4$ = int\_add($i_2$, $i_3$) \\ $i_9$ = int\_add($i_4$, -100) \\ \end{tabular} }; \node[guard, name=block2, below of=block1, node distance=6em] { - \begin{tabular}{c} + \begin{tabular}{l} guard\_class($p_0$, BoxedInteger) \\ - $i_{12}$ = getfield\_gc($p_0$, intval) \\ + $i_{12}$ = getfield($p_0$, intval) \\ $i_{14}$ = int\_add($i_{12}$, -1) \\ \end{tabular} }; \node[guard, name=block3, below of=block2, node distance=4.5em] { - \begin{tabular}{c} + \begin{tabular}{l} $i_{17}$ = int\_gt($i_{14}$, 0) \\ guard\_true($i_{17}$) \\ \end{tabular} }; \node[newguard, name=block4, below of=block3, node distance=5em] { - \begin{tabular}{c} + \begin{tabular}{l} $p_{15}$ = new(BoxedInteger) \\ - setfield\_gc($p_{15}$, $i_{14}$, intval) \\ + setfield($p_{15}$, $i_{14}$, intval) \\ $p_{10}$ = new(BoxedInteger) \\ - setfield\_gc($p_{10}$, $i_9$, intval) \\ + setfield($p_{10}$, $i_9$, intval) \\ \end{tabular} }; Modified: pypy/extradoc/talk/pepm2011/figures/step2.pdf ============================================================================== Binary files. No diff available. Modified: pypy/extradoc/talk/pepm2011/figures/step2.tikz ============================================================================== --- pypy/extradoc/talk/pepm2011/figures/step2.tikz (original) +++ pypy/extradoc/talk/pepm2011/figures/step2.tikz Tue Sep 28 11:51:29 2010 @@ -1,7 +1,7 @@ % -*- mode: latex; auto-revert-interval: 0.5 -*- \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto, node distance=4em, - very thin,font={\scriptsize\bf}] + very thin,font={\scriptsize\tt}] \pgfdeclarelayer{background} \pgfdeclarelayer{foreground} @@ -20,38 +20,38 @@ \clip (-3.5, 1.3) rectangle (2.5,-9.3); % plain nodes - \node[enter, name=start] {loop($i_0$, $i_1$)} ; + \node[enter, name=start] {loop[$i_0$, $i_1$]:} ; \node[newguard, name=block4, below of=start, node distance=4.2em] { - \begin{tabular}{c} + \begin{tabular}{l} $p_{0}$ = new(BoxedInteger) \\ - setfield\_gc($p_{0}$, $i_{0}$, intval) \\ + setfield($p_{0}$, $i_{0}$, intval) \\ $p_{1}$ = new(BoxedInteger) \\ - setfield\_gc($p_{1}$, $i_1$, intval) \\ + setfield($p_{1}$, $i_1$, intval) \\ \end{tabular} }; \node[guard, name=block1, below of=block4, node distance=6em] { - \begin{tabular}{c} + \begin{tabular}{l} guard\_class($p_1$, BoxedInteger) \\ - $i_2$ = getfield\_gc($p_1$, intval) \\ + $i_2$ = getfield($p_1$, intval) \\ guard\_class($p_0$, BoxedInteger) \\ - $i_3$ = getfield\_gc($p_0$, intval) \\ + $i_3$ = getfield($p_0$, intval) \\ $i_4$ = int\_add($i_2$, $i_3$) \\ $i_9$ = int\_add($i_4$, -100) \\ \end{tabular} }; \node[guard, name=block2, below of=block1, node distance=6em] { - \begin{tabular}{c} + \begin{tabular}{l} guard\_class($p_0$, BoxedInteger) \\ - $i_{12}$ = getfield\_gc($p_0$, intval) \\ + $i_{12}$ = getfield($p_0$, intval) \\ $i_{14}$ = int\_add($i_{12}$, -1) \\ \end{tabular} }; \node[guard, name=block3, below of=block2, node distance=4em] { - \begin{tabular}{c} + \begin{tabular}{l} $i_{17}$ = int\_gt($i_{14}$, 0) \\ guard\_true($i_{17}$) \\ \end{tabular} Modified: pypy/extradoc/talk/pepm2011/figures/step3.pdf ============================================================================== Binary files. No diff available. Modified: pypy/extradoc/talk/pepm2011/figures/step3.tikz ============================================================================== --- pypy/extradoc/talk/pepm2011/figures/step3.tikz (original) +++ pypy/extradoc/talk/pepm2011/figures/step3.tikz Tue Sep 28 11:51:29 2010 @@ -1,7 +1,7 @@ % -*- mode: latex; auto-revert-interval: 0.5 -*- \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto, node distance=4em, - very thin,font={\scriptsize\bf}] + very thin,font={\scriptsize\tt}] \pgfdeclarelayer{background} \pgfdeclarelayer{foreground} @@ -23,7 +23,7 @@ \node[enter, name=start2, node distance=5em] {loop($i_0$, $i_1$)} ; \node[guard, name=block4, below of=start2, node distance=5em] { - \begin{tabular}{c} + \begin{tabular}{l} $i_4$ = int\_add($i_1$, $i_0$) \\ $i_9$ = int\_add($i_4$, -100) \\ $i_{14}$ = int\_add($i_0$, -1) \\ Modified: pypy/extradoc/talk/pepm2011/figures/step4.pdf ============================================================================== Binary files. No diff available. Modified: pypy/extradoc/talk/pepm2011/figures/step4.tikz ============================================================================== --- pypy/extradoc/talk/pepm2011/figures/step4.tikz (original) +++ pypy/extradoc/talk/pepm2011/figures/step4.tikz Tue Sep 28 11:51:29 2010 @@ -1,7 +1,7 @@ % -*- mode: latex; auto-revert-interval: 0.5 -*- \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto, node distance=4em, - very thin,font={\scriptsize\bf}] + very thin,font={\scriptsize\tt}] \pgfdeclarelayer{background} \pgfdeclarelayer{foreground} @@ -22,26 +22,26 @@ % plain nodes \node[enter, name=start] {entry($p_0$, $p_1$)} ; \node[guard, name=block1, below of=start, node distance=5em] { - \begin{tabular}{c} + \begin{tabular}{l} guard\_class($p_1$, BoxedInteger) \\ - $i_2$ = getfield\_gc($p_1$, intval) \\ + $i_2$ = getfield($p_1$, intval) \\ guard\_class($p_0$, BoxedInteger) \\ - $i_3$ = getfield\_gc($p_0$, intval) \\ + $i_3$ = getfield($p_0$, intval) \\ $i_5$ = int\_add($i_2$, $i_3$) \\ $i_6$ = int\_add($i_5$, -100) \\ \end{tabular} }; \node[guard, name=block2, below of=block1, node distance=6em] { - \begin{tabular}{c} + \begin{tabular}{l} guard\_class($p_0$, BoxedInteger) \\ - $i_{12}$ = getfield\_gc($p_0$, intval) \\ + $i_{12}$ = getfield($p_0$, intval) \\ $i_{7}$ = int\_add($i_{12}$, -1) \\ \end{tabular} }; \node[guard, name=block3, below of=block2, node distance=4.5em] { - \begin{tabular}{c} + \begin{tabular}{l} $i_{8}$ = int\_gt($i_{7}$, 0) \\ guard\_true($i_{8}$) \\ \end{tabular} @@ -49,9 +49,9 @@ \node[guard, name=block3a, below of=block3, node distance=3em] { jump($i_{7}$, $i_6$) \\ } ; - \node[enter, name=start2, below of=block3a, node distance=5em] {loop($i_0$, $i_1$)} ; + \node[enter, name=start2, below of=block3a, node distance=5em] {loop[$i_0$, $i_1$]:} ; \node[guard, name=block4, below of=start2, node distance=5em] { - \begin{tabular}{c} + \begin{tabular}{l} $i_4$ = int\_add($i_1$, $i_0$) \\ $i_9$ = int\_add($i_4$, -100) \\ $i_{14}$ = int\_add($i_0$, -1) \\ Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Tue Sep 28 11:51:29 2010 @@ -341,62 +341,64 @@ \begin{figure} -\begin{verbatim} - # arguments to the trace: p0, p1 - # inside f: res.add(y) - guard_class(p1, BoxedInteger) - # inside BoxedInteger.add - i2 = getfield_gc(p1, intval) - guard_class(p0, BoxedInteger) - # inside BoxedInteger.add__int - i3 = getfield_gc(p0, intval) - i4 = int_add(i2, i3) - p5 = new(BoxedInteger) - # inside BoxedInteger.__init__ - setfield_gc(p5, i4, intval) - # inside f: BoxedInteger(-100) - p6 = new(BoxedInteger) - # inside BoxedInteger.__init__ - setfield_gc(p6, -100, intval) - - # inside f: .add(BoxedInteger(-100)) - guard_class(p5, BoxedInteger) - # inside BoxedInteger.add - i7 = getfield_gc(p5, intval) - guard_class(p6, BoxedInteger) - # inside BoxedInteger.add__int - i8 = getfield_gc(p6, intval) - i9 = int_add(i7, i8) - p10 = new(BoxedInteger) - # inside BoxedInteger.__init__ - setfield_gc(p10, i9, intval) - - # inside f: BoxedInteger(-1) - p11 = new(BoxedInteger) - # inside BoxedInteger.__init__ - setfield_gc(p11, -1, intval) - - # inside f: y.add(BoxedInteger(-1)) - guard_class(p0, BoxedInteger) - # inside BoxedInteger.add - i12 = getfield_gc(p0, intval) - guard_class(p11, BoxedInteger) - # inside BoxedInteger.add__int - i13 = getfield_gc(p11, intval) - i14 = int_add(i12, i13) - p15 = new(BoxedInteger) - # inside BoxedInteger.__init__ - setfield_gc(p15, i14, intval) - - # inside f: y.is_positive() - guard_class(p15, BoxedInteger) - # inside BoxedInteger.is_positive - i16 = getfield_gc(p15, intval) - i17 = int_gt(i16, 0) - # inside f - guard_true(i17) - jump(p15, p10) -\end{verbatim} +\texttt{ +\begin{tabular}{l} +\# arguments to the trace: $p_{0}$, $p_{1}$ \\ +\# inside f: res.add(y) \\ +guard\_class($p_{1}$, BoxedInteger) \\ +~~~~\# inside BoxedInteger.add \\ +~~~~$i_{2}$ = getfield($p_{1}$, intval) \\ +~~~~guard\_class($p_{0}$, BoxedInteger) \\ +~~~~~~~~\# inside BoxedInteger.add\_\_int \\ +~~~~~~~~$i_{3}$ = getfield($p_{0}$, intval) \\ +~~~~~~~~$i_{4}$ = int\_add($i_{2}$, $i_{3}$) \\ +~~~~~~~~$p_{5}$ = new(BoxedInteger) \\ +~~~~~~~~~~~~\# inside BoxedInteger.\_\_init\_\_ \\ +~~~~~~~~~~~~setfield($p_{5}$, $i_{4}$, intval) \\ +\# inside f: BoxedInteger(-100) \\ +$p_{6}$ = new(BoxedInteger) \\ +~~~~\# inside BoxedInteger.\_\_init\_\_ \\ +~~~~setfield($p_{6}$, -100, intval) \\ +~\\ +\# inside f: .add(BoxedInteger(-100)) \\ +guard\_class($p_{5}$, BoxedInteger) \\ +~~~~\# inside BoxedInteger.add \\ +~~~~$i_{7}$ = getfield($p_{5}$, intval) \\ +~~~~guard\_class($p_{6}$, BoxedInteger) \\ +~~~~~~~~\# inside BoxedInteger.add\_\_int \\ +~~~~~~~~$i_{8}$ = getfield($p_{6}$, intval) \\ +~~~~~~~~$i_{9}$ = int\_add($i_{7}$, $i_{8}$) \\ +~~~~~~~~$p_{10}$ = new(BoxedInteger) \\ +~~~~~~~~~~~~\# inside BoxedInteger.\_\_init\_\_ \\ +~~~~~~~~~~~~setfield($p_{10}$, $i_{9}$, intval) \\ +~\\ +\# inside f: BoxedInteger(-1) \\ +$p_{11}$ = new(BoxedInteger) \\ +~~~~\# inside BoxedInteger.\_\_init\_\_ \\ +~~~~setfield($p_{11}$, -1, intval) \\ +~\\ +\# inside f: y.add(BoxedInteger(-1)) \\ +guard\_class($p_{0}$, BoxedInteger) \\ +~~~~\# inside BoxedInteger.add \\ +~~~~$i_{12}$ = getfield($p_{0}$, intval) \\ +~~~~guard\_class($p_{11}$, BoxedInteger) \\ +~~~~~~~~\# inside BoxedInteger.add\_\_int \\ +~~~~~~~~$i_{13}$ = getfield($p_{11}$, intval) \\ +~~~~~~~~$i_{14}$ = int\_add($i_{12}$, $i_{13}$) \\ +~~~~~~~~$p_{15}$ = new(BoxedInteger) \\ +~~~~~~~~~~~~\# inside BoxedInteger.\_\_init\_\_ \\ +~~~~~~~~~~~~setfield($p_{15}$, $i_{14}$, intval) \\ +~\\ +\# inside f: y.is\_positive() \\ +guard\_class($p_{15}$, BoxedInteger) \\ +~~~~\# inside BoxedInteger.is\_positive \\ +~~~~$i_{16}$ = getfield($p_{15}$, intval) \\ +~~~~$i_{17}$ = int\_gt($i_{16}$, 0) \\ +\# inside f \\ +guard\_true($i_{17}$) \\ +jump($p_{15}$, $p_{10}$) \\ +\end{tabular} +} \label{fig:unopt-trace} \caption{Unoptimized Trace for the Simple Object Model} \end{figure} @@ -406,7 +408,7 @@ Figure~\ref{fig:unopt-trace}. The operations in the trace are shown indented to correspond to the stack level of the function that contains the traced operation. The trace also shows the inefficiencies of \texttt{f} clearly, if one -looks at the number of \texttt{new}, \texttt{set/getfield\_gc} and +looks at the number of \texttt{new}, \texttt{set/getfield} and \texttt{guard\_class} operations. Note how the functions that are called by \texttt{f} are automatically inlined @@ -421,7 +423,6 @@ using the interpreter. XXX simplify traces a bit more -get rid of \_gc suffix in set/getfield\_gc In the next section, we will see how this can be improved upon, using escape analysis. XXX @@ -481,7 +482,7 @@ The objects that are allocated in the example trace in Figure~\ref{fig:unopt-trace} fall into categories 1 and 3. Objects stored in -\texttt{p5, p6, p11 XXX} are in category 1, objects in \texttt{p10, p15} are in +$p_{5}$, $p_{6}$, $p_{11}$ are in category 1, objects in $p_{10}$, $p_{15}$ are in category 3. The creation of objects in category 1 is removed by the optimization described @@ -522,39 +523,44 @@ In the example from last section, the following operations would produce two virtual objects, and be completely removed from the optimized trace: -\begin{verbatim} -p5 = new(BoxedInteger) -setfield_gc(p5, i4, intval) -p6 = new(BoxedInteger) -setfield_gc(p6, -100, intval) -\end{verbatim} - +\texttt{ +\begin{tabular}{l} +$p_{5}$ = new(BoxedInteger) \\ +setfield($p_{5}$, $i_{4}$, intval) \\ +$p_{6}$ = new(BoxedInteger) \\ +setfield($p_{6}$, -100, intval) \\ +\end{tabular} +} -The virtual object stored in \texttt{p5} would know that it is an \texttt{BoxedInteger}, and that -the \texttt{intval} field contains \texttt{i4}, the one stored in \texttt{p6} would know that +The virtual object stored in $p_{5}$ would know that it is an \texttt{BoxedInteger}, and that +the \texttt{intval} field contains $i_{4}$, the one stored in $p_{6}$ would know that its \texttt{intval} field contains the constant -100. -The following operations, that use \texttt{p5} and \texttt{p6} could then be +The following operations, that use $p_{5}$ and $p_{6}$ could then be optimized using that knowledge: -\begin{verbatim} -guard_class(p5, BoxedInteger) -i7 = getfield_gc(p5, intval) -# inside BoxedInteger.add -guard_class(p6, BoxedInteger) -# inside BoxedInteger.add__int -i8 = getfield_gc(p6, intval) -i9 = int_add(i7, i8) -\end{verbatim} - -The \texttt{guard\_class} operations can be removed, because the classes of \texttt{p5} and -\texttt{p6} are known to be \texttt{BoxedInteger}. The \texttt{getfield\_gc} operations can be removed -and \texttt{i7} and \texttt{i8} are just replaced by \texttt{i4} and -100. Thus the only +\texttt{ +\begin{tabular}{l} +guard\_class($p_{5}$, BoxedInteger) \\ +$i_{7}$ = getfield($p_{5}$, intval) \\ +\# inside BoxedInteger.add \\ +guard\_class($p_{6}$, BoxedInteger) \\ +\# inside BoxedInteger.add\_\_int \\ +$i_{8}$ = getfield($p_{6}$, intval) \\ +$i_{9}$ = int\_add($i_{7}$, $i_{8}$) \\ +\end{tabular} +} + +The \texttt{guard\_class} operations can be removed, because the classes of $p_{5}$ and +$p_{6}$ are known to be \texttt{BoxedInteger}. The \texttt{getfield} operations can be removed +and $i_{7}$ and $i_{8}$ are just replaced by $i_{4}$ and -100. Thus the only remaining operation in the optimized trace would be: -\begin{verbatim} -i9 = int_add(i4, -100) -\end{verbatim} +\texttt{ +\begin{tabular}{l} +$i_{9}$ = int\_add($i_{4}$, -100) \\ +\end{tabular} +} The rest of the trace is optimized similarly. @@ -573,14 +579,13 @@ values that the virtual object has. This means that instead of the jump, the following operations are emitted: -XXX should the variables be written in $math-style$ everywhere? \texttt{ \begin{tabular}{l} $p_{15}$ = new(BoxedInteger) \\ -setfield\_gc($p_{15}$, $i_{14}$, intval) \\ +setfield($p_{15}$, $i_{14}$, intval) \\ $p_{10}$ = new(BoxedInteger) \\ -setfield\_gc($p_{10}$, $i_{9}$, intval) \\ +setfield($p_{10}$, $i_{9}$, intval) \\ jump($p_{15}$, $p_{10}$) \\ \end{tabular} } @@ -589,7 +594,7 @@ trace. It looks like for these operations we actually didn't win much, because the objects are still allocated at the end. However, the optimization was still worthwhile even in this case, because some operations that have been performed -on the forced virtual objects have been removed (some \texttt{getfield\_gc} operations +on the forced virtual objects have been removed (some \texttt{getfield} operations and \texttt{guard\_class} operations). \begin{figure} @@ -659,7 +664,7 @@ The final trace was much better than the original one, because many allocations were removed from it. However, it also still contained allocations: -The two new \texttt{BoxedIntegers} stored in \texttt{p15} and \texttt{p10} are passed into +The two new \texttt{BoxedIntegers} stored in $p_{15}$ and $p_{10}$ are passed into the next iteration of the loop. The next iteration will check that they are indeed \texttt{BoxedIntegers}, read their \texttt{intval} fields and then not use them any more. Thus those instances are in category 3. From cfbolz at codespeak.net Tue Sep 28 12:02:35 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 28 Sep 2010 12:02:35 +0200 (CEST) Subject: [pypy-svn] r77433 - pypy/extradoc/talk/pepm2011 Message-ID: <20100928100235.1D571282BDD@codespeak.net> Author: cfbolz Date: Tue Sep 28 12:02:33 2010 New Revision: 77433 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: cite psyco Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Tue Sep 28 12:02:33 2010 @@ -144,7 +144,7 @@ and type dispatching. To understand the problem more closely, we analyze the occurring object lifetimes in Section~\ref{sec:lifetimes}. The most important technique to achieve this is a form of escape analysis \cite{XXX} that we call -\emph{virtual objects},\footnote{The terminology comes from \cite{Psyco}} +\emph{virtual objects}\footnote{The terminology comes from \cite{rigo_representation-based_2004}}, which is described in Section~\ref{sec:virtuals}. The goal of virtual objects is to remove allocations of temporary objects that have a predictable lifetime and to optimize type dispatching in the process. From cfbolz at codespeak.net Tue Sep 28 12:06:06 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 28 Sep 2010 12:06:06 +0200 (CEST) Subject: [pypy-svn] r77434 - in pypy/extradoc/talk/pepm2011: . figures Message-ID: <20100928100606.B3B98282BDD@codespeak.net> Author: cfbolz Date: Tue Sep 28 12:06:05 2010 New Revision: 77434 Modified: pypy/extradoc/talk/pepm2011/figures/step1.tikz pypy/extradoc/talk/pepm2011/figures/step2.tikz pypy/extradoc/talk/pepm2011/paper.tex Log: swap last two arguments in setfield Modified: pypy/extradoc/talk/pepm2011/figures/step1.tikz ============================================================================== --- pypy/extradoc/talk/pepm2011/figures/step1.tikz (original) +++ pypy/extradoc/talk/pepm2011/figures/step1.tikz Tue Sep 28 12:06:05 2010 @@ -50,9 +50,9 @@ \node[newguard, name=block4, below of=block3, node distance=5em] { \begin{tabular}{l} $p_{15}$ = new(BoxedInteger) \\ - setfield($p_{15}$, $i_{14}$, intval) \\ + setfield($p_{15}$, intval, $i_{14}$) \\ $p_{10}$ = new(BoxedInteger) \\ - setfield($p_{10}$, $i_9$, intval) \\ + setfield($p_{10}$, intval, $i_9$) \\ \end{tabular} }; Modified: pypy/extradoc/talk/pepm2011/figures/step2.tikz ============================================================================== --- pypy/extradoc/talk/pepm2011/figures/step2.tikz (original) +++ pypy/extradoc/talk/pepm2011/figures/step2.tikz Tue Sep 28 12:06:05 2010 @@ -25,9 +25,9 @@ \node[newguard, name=block4, below of=start, node distance=4.2em] { \begin{tabular}{l} $p_{0}$ = new(BoxedInteger) \\ - setfield($p_{0}$, $i_{0}$, intval) \\ + setfield($p_{0}$, intval, $i_{0}$) \\ $p_{1}$ = new(BoxedInteger) \\ - setfield($p_{1}$, $i_1$, intval) \\ + setfield($p_{1}$, intval, $i_1$) \\ \end{tabular} }; Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Tue Sep 28 12:06:05 2010 @@ -354,11 +354,11 @@ ~~~~~~~~$i_{4}$ = int\_add($i_{2}$, $i_{3}$) \\ ~~~~~~~~$p_{5}$ = new(BoxedInteger) \\ ~~~~~~~~~~~~\# inside BoxedInteger.\_\_init\_\_ \\ -~~~~~~~~~~~~setfield($p_{5}$, $i_{4}$, intval) \\ +~~~~~~~~~~~~setfield($p_{5}$, intval, $i_{4}$) \\ \# inside f: BoxedInteger(-100) \\ $p_{6}$ = new(BoxedInteger) \\ ~~~~\# inside BoxedInteger.\_\_init\_\_ \\ -~~~~setfield($p_{6}$, -100, intval) \\ +~~~~setfield($p_{6}$, intval, -100) \\ ~\\ \# inside f: .add(BoxedInteger(-100)) \\ guard\_class($p_{5}$, BoxedInteger) \\ @@ -370,12 +370,12 @@ ~~~~~~~~$i_{9}$ = int\_add($i_{7}$, $i_{8}$) \\ ~~~~~~~~$p_{10}$ = new(BoxedInteger) \\ ~~~~~~~~~~~~\# inside BoxedInteger.\_\_init\_\_ \\ -~~~~~~~~~~~~setfield($p_{10}$, $i_{9}$, intval) \\ +~~~~~~~~~~~~setfield($p_{10}$, intval, $i_{9}$) \\ ~\\ \# inside f: BoxedInteger(-1) \\ $p_{11}$ = new(BoxedInteger) \\ ~~~~\# inside BoxedInteger.\_\_init\_\_ \\ -~~~~setfield($p_{11}$, -1, intval) \\ +~~~~setfield($p_{11}$, intval, -1) \\ ~\\ \# inside f: y.add(BoxedInteger(-1)) \\ guard\_class($p_{0}$, BoxedInteger) \\ @@ -387,7 +387,7 @@ ~~~~~~~~$i_{14}$ = int\_add($i_{12}$, $i_{13}$) \\ ~~~~~~~~$p_{15}$ = new(BoxedInteger) \\ ~~~~~~~~~~~~\# inside BoxedInteger.\_\_init\_\_ \\ -~~~~~~~~~~~~setfield($p_{15}$, $i_{14}$, intval) \\ +~~~~~~~~~~~~setfield($p_{15}$, intval, $i_{14}$) \\ ~\\ \# inside f: y.is\_positive() \\ guard\_class($p_{15}$, BoxedInteger) \\ @@ -526,9 +526,9 @@ \texttt{ \begin{tabular}{l} $p_{5}$ = new(BoxedInteger) \\ -setfield($p_{5}$, $i_{4}$, intval) \\ +setfield($p_{5}$, intval, $i_{4}$) \\ $p_{6}$ = new(BoxedInteger) \\ -setfield($p_{6}$, -100, intval) \\ +setfield($p_{6}$, intval, -100) \\ \end{tabular} } @@ -583,9 +583,9 @@ \texttt{ \begin{tabular}{l} $p_{15}$ = new(BoxedInteger) \\ -setfield($p_{15}$, $i_{14}$, intval) \\ +setfield($p_{15}$, intval, $i_{14}$) \\ $p_{10}$ = new(BoxedInteger) \\ -setfield($p_{10}$, $i_{9}$, intval) \\ +setfield($p_{10}$, intval, $i_{9}$) \\ jump($p_{15}$, $p_{10}$) \\ \end{tabular} } From cfbolz at codespeak.net Tue Sep 28 12:07:52 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 28 Sep 2010 12:07:52 +0200 (CEST) Subject: [pypy-svn] r77435 - pypy/extradoc/talk/pepm2011/figures Message-ID: <20100928100752.825E8282BDD@codespeak.net> Author: cfbolz Date: Tue Sep 28 12:07:51 2010 New Revision: 77435 Modified: pypy/extradoc/talk/pepm2011/figures/step1.pdf pypy/extradoc/talk/pepm2011/figures/step2.pdf pypy/extradoc/talk/pepm2011/figures/step3.pdf pypy/extradoc/talk/pepm2011/figures/step3.tikz pypy/extradoc/talk/pepm2011/figures/step4.pdf pypy/extradoc/talk/pepm2011/figures/step4.tikz Log: make things consistent Modified: pypy/extradoc/talk/pepm2011/figures/step1.pdf ============================================================================== Binary files. No diff available. Modified: pypy/extradoc/talk/pepm2011/figures/step2.pdf ============================================================================== Binary files. No diff available. Modified: pypy/extradoc/talk/pepm2011/figures/step3.pdf ============================================================================== Binary files. No diff available. Modified: pypy/extradoc/talk/pepm2011/figures/step3.tikz ============================================================================== --- pypy/extradoc/talk/pepm2011/figures/step3.tikz (original) +++ pypy/extradoc/talk/pepm2011/figures/step3.tikz Tue Sep 28 12:07:51 2010 @@ -21,7 +21,7 @@ % plain nodes - \node[enter, name=start2, node distance=5em] {loop($i_0$, $i_1$)} ; + \node[enter, name=start2, node distance=5em] {loop[$i_0$, $i_1$]:} ; \node[guard, name=block4, below of=start2, node distance=5em] { \begin{tabular}{l} $i_4$ = int\_add($i_1$, $i_0$) \\ Modified: pypy/extradoc/talk/pepm2011/figures/step4.pdf ============================================================================== Binary files. No diff available. Modified: pypy/extradoc/talk/pepm2011/figures/step4.tikz ============================================================================== --- pypy/extradoc/talk/pepm2011/figures/step4.tikz (original) +++ pypy/extradoc/talk/pepm2011/figures/step4.tikz Tue Sep 28 12:07:51 2010 @@ -20,7 +20,7 @@ \clip (-3.5, 1.3) rectangle (2.5,-12); % plain nodes - \node[enter, name=start] {entry($p_0$, $p_1$)} ; + \node[enter, name=start] {entry[$p_0$, $p_1$]:} ; \node[guard, name=block1, below of=start, node distance=5em] { \begin{tabular}{l} guard\_class($p_1$, BoxedInteger) \\ From cfbolz at codespeak.net Tue Sep 28 12:15:03 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 28 Sep 2010 12:15:03 +0200 (CEST) Subject: [pypy-svn] r77436 - pypy/extradoc/talk/pepm2011 Message-ID: <20100928101503.C0277282BDD@codespeak.net> Author: cfbolz Date: Tue Sep 28 12:15:02 2010 New Revision: 77436 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: fix some XXXs Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Tue Sep 28 12:15:02 2010 @@ -12,7 +12,7 @@ \usepackage[utf8]{inputenc} \newboolean{showcomments} -\setboolean{showcomments}{false} +\setboolean{showcomments}{true} \ifthenelse{\boolean{showcomments}} {\newcommand{\nb}[2]{ \fbox{\bfseries\sffamily\scriptsize#1} @@ -151,7 +151,7 @@ The basic approach of virtual objects can then be extended to also be used for type-specializing the traces that are produced by the tracing JIT -(Section~\ref{sec:crossloop}). In Section~\ref{sec:XXX} we describe some +(Section~\ref{sec:crossloop}). In Section~\ref{sec:support} we describe some supporting techniques that are not central to the approach, but are needed to improve the results. The introduced techniques are evaluated in Section~\ref{sec:Evaluation} using PyPy's Python interpreter as a case study. @@ -475,7 +475,7 @@ \item Category 4: Objects that live for a while, survive across the jump, and then escape. To these we also count the objects that live across several jumps and then either escape or stop being used.\footnote{In theory, the - approach of Section~\ref{sec:XXX} works also for objects that live for + approach of Section~\ref{sec:crossloop} works also for objects that live for exactly $n>1$ iterations and then don't escape, but we expect this to be a very rare case, so we do not handle it.} \end{itemize} @@ -505,7 +505,7 @@ This process is called \emph{escape analysis}. The escape analysis of our tracing JIT works by using \emph{virtual objects}: The trace is walked from beginning to end and whenever a \texttt{new} operation is seen, the operation is -removed and a virtual object\footnote{XXX what I have in mind when I talk +removed and a virtual object\arigo{XXX what I have in mind when I talk of ``virtual object'' is the run-time behavior -- i.e. a real object that would exist at run-time, except that it has be virtual-ized. Here you seem to mean rather ``virtual object description'' or something.} @@ -570,9 +570,7 @@ is stored in a globally accessible place, the object needs to actually be allocated, as it will live longer than one iteration of the loop. -XXX ``the trace above'' is dangerous; should mention all figures by numbers - -This is what happens at the end of the trace above, when the \texttt{jump} operation +This is what happens at the end of the trace in Figure~\ref{fig:unopt-trace}, when the \texttt{jump} operation is hit. The arguments of the jump are at this point virtual objects. Before the jump is emitted, they are \emph{forced}. This means that the optimizers produces code that allocates a new object of the right type and sets its fields to the field @@ -676,7 +674,7 @@ The reason why we cannot optimize the remaining allocations away is because their lifetime crosses the jump. To improve the situation, a little trick is -needed. The trace above represents a loop, i.e. the jump at the end jumps to +needed. The trace in Figure~\ref{fig:step1} represents a loop, i.e. the jump at the end jumps to the beginning. Where in the loop the jump occurs is arbitrary, since the loop can only be left via failing guards anyway. Therefore it does not change the semantics of the loop to put the jump at another point into the trace and we @@ -684,7 +682,7 @@ appear in the current \texttt{jump}. This needs some care, because the arguments to \texttt{jump} are all currently live variables, thus they need to be adapted. -If we do that for our example trace above, the trace looks like this: +If we do that for our example trace, the trace looks like this: \begin{figure} \includegraphics{figures/step2.pdf} \end{figure} @@ -756,6 +754,15 @@ % section Escape Analysis Across Loop Boundaries (end) +\section{Supporting Techniques} +\label{sec:support} + +XXX virtualizables, store sinking + +do we need this? + +% section Supporting Techniques (end) + \section{Evaluation} \label{sec:Evaluation} From cfbolz at codespeak.net Tue Sep 28 12:17:57 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 28 Sep 2010 12:17:57 +0200 (CEST) Subject: [pypy-svn] r77437 - pypy/extradoc/talk/pepm2011 Message-ID: <20100928101757.41E1E282BDD@codespeak.net> Author: cfbolz Date: Tue Sep 28 12:17:55 2010 New Revision: 77437 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: that figures Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Tue Sep 28 12:17:55 2010 @@ -682,21 +682,25 @@ appear in the current \texttt{jump}. This needs some care, because the arguments to \texttt{jump} are all currently live variables, thus they need to be adapted. -If we do that for our example trace, the trace looks like this: +If we do that for our example trace, the trace looks like in Figure~\ref{fig:step2}. + \begin{figure} \includegraphics{figures/step2.pdf} +\label{fig:step2} +\caption{Shifting the Jump} \end{figure} -XXX the figure is moved elsewhere by latex Now the lifetime of the remaining allocations no longer crosses the jump, and -we can run our escape analysis a second time, to get the following trace: +we can run our escape analysis a second time, to get the trace in +Figure~\ref{fig:step3}. + \begin{figure} \includegraphics{figures/step3.pdf} +\label{fig:step3} +\caption{Removing Allocations a Second Time} \end{figure} -XXX the figure is moved elsewhere by latex - This result is now really good. The code performs the same operations than the original code, but using direct CPU arithmetic and no boxing, as opposed to the original version which used dynamic dispatching and boxing. @@ -732,14 +736,14 @@ needs to be some additional code that enters the loop by taking as input arguments what is available to the interpreter, i.e. two instances. This additional code corresponds to one iteration of the loop, which is thus -peeled off \cite{XXX}: +peeled off \cite{XXX}, see Figure~\ref{fig:step3}. \begin{figure} \includegraphics{figures/step4.pdf} +\label{fig:step3} +\caption{A Way to Enter the Loop From the Interpreter} \end{figure} -XXX the figure is moved elsewhere by latex - XXX optimization particularly effective for chains of operations %___________________________________________________________________________ From cfbolz at codespeak.net Tue Sep 28 13:43:51 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 28 Sep 2010 13:43:51 +0200 (CEST) Subject: [pypy-svn] r77438 - pypy/extradoc/talk/pepm2011 Message-ID: <20100928114351.EC0A5282C16@codespeak.net> Author: cfbolz Date: Tue Sep 28 13:43:50 2010 New Revision: 77438 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: add two paragraphs about virtualizables, I guess they are cuttable Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Tue Sep 28 13:43:50 2010 @@ -761,7 +761,32 @@ \section{Supporting Techniques} \label{sec:support} -XXX virtualizables, store sinking +\subsection{Virtualizables} +\label{sub:Virtualizables} + +\cfbolz{probably can be cut in case of space problems} + +One problem to the successful application of the allocation removal techniques +described in the previous sections is the presence of frame-introspection +features in many dynamic languages. Languages such as Python and Smalltalk allow +the programmer to get access to the frames object that the interpreter uses to +store local variables. This is a useful feature, as makes the implementation of +a debugger possible in Python without needing much support from the VM level. On +the other hand, it severely hinders the effectiveness of allocation removal, +because every time an object is stored into a local variable, it is stored into +the frame-object, which makes it escape. + +This problem is solved by making it possible to the interpreter author to add +some hints into the source code to declare instances of one class as frame +objects. The JIT will then fill these objects only lazily when they are actually +accessed (\eg because a debugger is used). Therefore in the common case, nothing +is stored into the frame objects, making the problem of too much escaping go +away. This is a common approach in VM implementations \cite{XXX}, the only +novelty in our approach lays in its generality, because most other JITs are just +specifically written for one particular language. + +% subsection Virtualizables (end) +XXX store sinking do we need this? From cfbolz at codespeak.net Tue Sep 28 14:39:12 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 28 Sep 2010 14:39:12 +0200 (CEST) Subject: [pypy-svn] r77439 - pypy/extradoc/talk/pepm2011 Message-ID: <20100928123912.9EAA9282BDD@codespeak.net> Author: cfbolz Date: Tue Sep 28 14:39:11 2010 New Revision: 77439 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: fix an XXX Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Tue Sep 28 14:39:11 2010 @@ -517,8 +517,8 @@ object, that shape summary is thus updated and the operation can be removed. When the optimizer encounters a \texttt{getfield} from a virtual, the result is read from the virtual object, and the operation is also removed. - -XXX what happens on a guard\_class? +Equivalently, a \texttt{guard\_class} on a virtual object can be removed as +well, because the virtual object has a fixed and known class. In the example from last section, the following operations would produce two virtual objects, and be completely removed from the optimized trace: @@ -565,10 +565,11 @@ The rest of the trace is optimized similarly. So far we have only described what happens when virtual objects are used in -operations that read and write their fields. When the virtual object is used in +operations that read and write their fields and in guards. When the virtual +object is used in any other operation, it cannot stay virtual. For example, when a virtual object is stored in a globally accessible place, the object needs to actually be -allocated, as it will live longer than one iteration of the loop. +allocated, as it might live longer than one iteration of the loop. This is what happens at the end of the trace in Figure~\ref{fig:unopt-trace}, when the \texttt{jump} operation is hit. The arguments of the jump are at this point virtual objects. Before the @@ -577,7 +578,6 @@ values that the virtual object has. This means that instead of the jump, the following operations are emitted: - \texttt{ \begin{tabular}{l} $p_{15}$ = new(BoxedInteger) \\ From cfbolz at codespeak.net Tue Sep 28 14:40:13 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 28 Sep 2010 14:40:13 +0200 (CEST) Subject: [pypy-svn] r77440 - pypy/extradoc/talk/pepm2011 Message-ID: <20100928124013.12974282BDD@codespeak.net> Author: cfbolz Date: Tue Sep 28 14:40:11 2010 New Revision: 77440 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: list of benchmarks I want to use Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Tue Sep 28 14:40:11 2010 @@ -795,6 +795,24 @@ \section{Evaluation} \label{sec:Evaluation} +Benchmarks from the Computer Language Benchmark Game are: fannkuch, nbody, +meteor-contest, spectral-norm. + +\textbf{crypto\_pyaes}: AES implementation. +\textbf{django}: The templating engine of the Django web +framework\footnote{\texttt{http://www.djangoproject.com/}}. +\textbf{go}: A Monte-Carlo Go +AI\footnote{\texttt{http://shed-skin.blogspot.com/2009/07/disco-elegant-python-go-player.html}}. +\textbf{html5lib}: HTML5 parser +\textbf{pyflate-fast}: BZ2 decoder +\textbf{raytrace-simple}: ray tracer +\textbf{richards}: The Richards benchmark \cite{XXX} +\textbf{spambayes}: A Bayesian spam filter\footnote{\texttt{http://spambayes.sourceforge.net/}}. +\textbf{telco}: A Python version of the Telco decimal +benchmark\footnote{\texttt{http://speleotrove.com/decimal/telco.html}}, using a pure +Python decimal floating point implementation. +\textbf{twisted\_names}: A DNS server benchmark using the Twisted networking +framework\footnote{\texttt{http://twistedmatrix.com/trac/}}. \section{Related Work} \label{sec:related} From cfbolz at codespeak.net Tue Sep 28 14:49:50 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 28 Sep 2010 14:49:50 +0200 (CEST) Subject: [pypy-svn] r77441 - pypy/extradoc/talk/pepm2011 Message-ID: <20100928124950.D8A22282BDD@codespeak.net> Author: cfbolz Date: Tue Sep 28 14:49:49 2010 New Revision: 77441 Modified: pypy/extradoc/talk/pepm2011/paper.bib pypy/extradoc/talk/pepm2011/paper.tex Log: add some references Modified: pypy/extradoc/talk/pepm2011/paper.bib ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.bib (original) +++ pypy/extradoc/talk/pepm2011/paper.bib Tue Sep 28 14:49:49 2010 @@ -65,10 +65,30 @@ publisher = {{ACM}}, author = {Davide Ancona and Massimo Ancona and Antonio Cuni and Nicholas D. Matsakis}, year = {2007}, - keywords = {{JVM,} .net, Python}, pages = {53--64} }, + at article{futamura_partial_1999, + title = {Partial Evaluation of Computation Process - An Approach to a {Compiler-Compiler}}, + volume = {12}, + url = {http://citeseer.ist.psu.edu/futamura99partial.html}, + number = {4}, + journal = {{Higher-Order} and Symbolic Computation}, + author = {Yoshihiko Futamura}, + year = {1999}, + pages = {381--391}, +}, + + at book{jones_partial_1993, + title = {Partial evaluation and automatic program generation}, + isbn = {0-13-020249-5}, + url = {http://portal.acm.org/citation.cfm?id=153676}, + abstract = {This book is out of print. For copies, Please refer to the following online page}, + publisher = {{Prentice-Hall,} Inc.}, + author = {Neil D. Jones and Carsten K. Gomard and Peter Sestoft}, + year = {1993} +}, + @inproceedings{armin_rigo_pypys_2006, address = {Portland, Oregon, {USA}}, title = {{PyPy's} approach to virtual machine construction}, @@ -80,7 +100,6 @@ publisher = {{ACM}}, author = {Armin Rigo and Samuele Pedroni}, year = {2006}, - keywords = {metacircularity, Python, retargettable code generation, type inference, {VM}}, pages = {944--953} }, @@ -94,6 +113,21 @@ pages = {20---34} }, + at techreport{miranda_context_1999, + title = {Context Management in {VisualWorks} 5i}, + abstract = {Smalltalk-80 provides a reification of execution state in the form of context objects which represent procedure activation records. Smalltalk-80 also provides full closures with indefinite extent. These features pose interesting implementation challenges because a na?ve implementation entails instantiating context objects on every method activation, but typical Smalltalk-80 programs obey stack discipline for the vast majority of activations. Both software and hardware implementations of Smalltalk-80 have mapped contexts and closure activations to stack frames but not without overhead when compared to traditional stack-based activation and return in ?conventional? languages. We present a new design for contexts and closures that significantly reduces the overall overhead of these features and imposes overhead only in code that actually manipulates execution state in the form of contexts.}, + institution = {{ParcPlace} Division, {CINCOM,} Inc.}, + author = {Eliot Miranda}, + year = {1999}, +}, + + at inproceedings{andreas_gal_trace-based_2009, + title = {Trace-based {Just-in-Time} Type Specialization for Dynamic Languages}, + booktitle = {{PLDI}}, + author = {Andreas Gal and Brendan Eich and Mike Shaver and David Anderson and Blake Kaplan and Graydon Hoare and David Mandelin and Boris Zbarsky and Jason Orendorff and Michael Bebenita and Mason Chang and Michael Franz and Edwin Smith and Rick Reitmaier and Mohammad Haghighat}, + year = {2009}, +}, + @inproceedings{bolz_tracing_2009, address = {Genova, Italy}, title = {Tracing the meta-level: {PyPy's} tracing {JIT} compiler}, @@ -109,9 +143,57 @@ pages = {18--25} }, + at techreport{mason_chang_efficient_2007, + title = {Efficient {Just-In-Time} Execution of Dynamically Typed Languages +Via Code Specialization Using Precise Runtime Type Inference}, + abstract = {Dynamically typed languages such as {JavaScript} present a challenge to just-in-time compilers. In contrast to statically typed languages such as {JVML,} in which there are specific opcodes for common operations on primitive types (such as iadd for integer addition), all operations in dynamically typed language such as {JavaScript} are late-bound. Often enough, types cannot be inferred with certainty ahead of execution. As a result, just-in-time compilers for dynamically typed languages have tended to perform worse than their statically-typed counterparts. We present a new approach to compiling dynamically typed languages in which code traces observed during execution are dynamically specialized for each actually observed run-time type. For most benchmark programs, our prototype {JavaScript} virtual machine outperforms every other {JavaScript} platform known to us.}, + number = {{ICS-TR-07-10}}, + institution = {Donald Bren School of Information and Computer Science, University of California, Irvine}, + author = {Mason Chang and Michael Bebenita and Alexander Yermolovich and Andreas Gal and Michael Franz}, + year = {2007}, +}, + + at article{bala_dynamo:_2000, + title = {Dynamo: a transparent dynamic optimization system}, + volume = {35}, + shorttitle = {Dynamo}, + url = {http://citeseer.ist.psu.edu/bala00dynamo.html}, + number = {5}, + journal = {{ACM} {SIGPLAN} Notices}, + author = {Vasanth Bala and Evelyn Duesterwald and Sanjeev Banerjia}, + year = {2000}, + pages = {1--12} +}, + + at techreport{andreas_gal_incremental_2006, + title = {Incremental Dynamic Code Generation with Trace Trees}, + abstract = {The unit of compilation for traditional just-in-time compilers is the method. We have explored trace-based compilation, in which the unit of compilation is a loop, potentially spanning multiple methods and even library code. Using a new intermediate representation that is discovered and updated lazily on-demand while the program is being executed, our compiler generates code that is competitive with traditional dynamic compilers, but that uses only a fraction of the compile time and memory footprint.}, + number = {{ICS-TR-06-16}}, + institution = {Donald Bren School of Information and Computer Science, University of California, Irvine}, + author = {Andreas Gal and Michael Franz}, + month = nov, + year = {2006}, + pages = {11} +}, + + at inproceedings{gal_hotpathvm:_2006, + address = {Ottawa, Ontario, Canada}, + title = {{HotpathVM:} an effective {JIT} compiler for resource-constrained devices}, + isbn = {1-59593-332-6}, + shorttitle = {{HotpathVM}}, + url = {http://portal.acm.org/citation.cfm?doid=1134760.1134780}, + doi = {10.1145/1134760.1134780}, + abstract = {We present a just-in-time compiler for a Java {VM} that is small enough to fit on resource-constrained devices, yet is surprisingly effective. Our system dynamically identifies traces of frequently executed bytecode instructions (which may span several basic blocks across several methods) and compiles them via Static Single Assignment {(SSA)} construction. Our novel use of {SSA} form in this context allows to hoist instructions across trace side-exits without necessitating expensive compensation code in off-trace paths. The overall memory consumption (code and data) of our system is only 150 {kBytes,} yet benchmarks show a speedup that in some cases rivals heavy-weight just-in-time compilers.}, + booktitle = {Proceedings of the 2nd international conference on Virtual execution environments}, + publisher = {{ACM}}, + author = {Andreas Gal and Christian W. Probst and Michael Franz}, + year = {2006}, + pages = {144--153} +}, + @inproceedings{rigo_representation-based_2004, address = {Verona, Italy}, - title = {Representation-based just-in-time specialization and the psyco prototype for python}, + title = {Representation-based just-in-time specialization and the Psyco prototype for Python}, isbn = {1-58113-835-0}, url = {http://portal.acm.org/citation.cfm?id=1014010}, doi = {10.1145/1014007.1014010}, @@ -120,7 +202,6 @@ publisher = {{ACM}}, author = {Armin Rigo}, year = {2004}, - keywords = {{JIT,} Python}, pages = {15--26} }, @@ -137,4 +218,4 @@ author = {Carl Friedrich Bolz and Adrian Kuhn and Adrian Lienhard and Nicholas Matsakis and Oscar Nierstrasz and Lukas Renggli and Armin Rigo and Toon Verwaest}, year = {2008}, pages = {123--139} -} \ No newline at end of file +} Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Tue Sep 28 14:49:49 2010 @@ -198,13 +198,14 @@ \subsection{Tracing JIT Compilers} \label{sub:JIT_background} -Tracing JITs are a recently popular approach to write just-in-time -compilers for dynamic languages \cite{XXX}. Their origins lie in the Dynamo -project, which used a tracing approach to optimize machine code using execution -traces \cite{XXX}. Tracing JITs have then be adapted to be used for a very light-weight -Java VM \cite{XXX} and afterwards used in several implementations of dynamic -languages, such as JavaScript \cite{XXX}, Lua \cite{XXX} and now Python via -PyPy. +Tracing JITs are a recently popular approach to write just-in-time compilers for +dynamic languages. Their origins lie in the Dynamo project, which used a tracing +approach to optimize machine code using execution traces +\cite{bala_dynamo:_2000}. Tracing JITs have then be adapted to be used for a +very light-weight Java VM \cite{gal_hotpathvm:_2006} and afterwards used in +several implementations of dynamic languages, such as JavaScript +\cite{andreas_gal_trace-based_2009}, Lua \cite{XXX} and now Python (and other +languages) via PyPy. The core idea of tracing JITs is to focus the optimization effort of the JIT compiler on the hot paths of the core loops of the program and to just use an From cfbolz at codespeak.net Tue Sep 28 14:53:15 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 28 Sep 2010 14:53:15 +0200 (CEST) Subject: [pypy-svn] r77442 - pypy/extradoc/talk/pepm2011 Message-ID: <20100928125315.1C87C282BDD@codespeak.net> Author: cfbolz Date: Tue Sep 28 14:53:13 2010 New Revision: 77442 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: minimally fix an XXX Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Tue Sep 28 14:53:13 2010 @@ -247,8 +247,6 @@ getting from the interpreter to traces -XXX object model and its reflection in traces (e.g. guard\_class before each method call) - \subsection{Running Example} For the purpose of this paper, we are going to use a very simple object @@ -409,8 +407,9 @@ Figure~\ref{fig:unopt-trace}. The operations in the trace are shown indented to correspond to the stack level of the function that contains the traced operation. The trace also shows the inefficiencies of \texttt{f} clearly, if one -looks at the number of \texttt{new}, \texttt{set/getfield} and -\texttt{guard\_class} operations. +looks at the number of \texttt{new} (corresponding to object creation), +\texttt{set/getfield} (corresponding to attribute reads/writes) and +\texttt{guard\_class} operations (corresponding to method calls). Note how the functions that are called by \texttt{f} are automatically inlined into the trace. The method calls are always preceded by a \texttt{guard\_class} From antocuni at codespeak.net Tue Sep 28 14:53:59 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Tue, 28 Sep 2010 14:53:59 +0200 (CEST) Subject: [pypy-svn] r77443 - pypy/trunk/pypy/tool Message-ID: <20100928125359.515E6282BDD@codespeak.net> Author: antocuni Date: Tue Sep 28 14:53:57 2010 New Revision: 77443 Modified: pypy/trunk/pypy/tool/progressbar.py Log: use a simple ascii dot instead of an unicode character encoded as ut8 and stored as a plain string. This fixes the progress bar in all the terminals without unicode support, such as mine :-) Modified: pypy/trunk/pypy/tool/progressbar.py ============================================================================== --- pypy/trunk/pypy/tool/progressbar.py (original) +++ pypy/trunk/pypy/tool/progressbar.py Tue Sep 28 14:53:57 2010 @@ -17,11 +17,11 @@ ) PADDING = 7 - def __init__(self, color=None, width=None, block='?', empty=' '): + def __init__(self, color=None, width=None, block='.', empty=' '): """ color -- color name (BLUE GREEN CYAN RED MAGENTA YELLOW WHITE BLACK) width -- bar width (optinal) - block -- progress display character (default '?') + block -- progress display character (default '.') empty -- bar display character (default ' ') """ if color: From antocuni at codespeak.net Tue Sep 28 14:55:14 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Tue, 28 Sep 2010 14:55:14 +0200 (CEST) Subject: [pypy-svn] r77444 - pypy/trunk/pypy/jit/tool Message-ID: <20100928125514.B0420282BDD@codespeak.net> Author: antocuni Date: Tue Sep 28 14:55:13 2010 New Revision: 77444 Modified: pypy/trunk/pypy/jit/tool/loopviewer.py Log: add the possibility to print a summary of the operations contained in the loop Modified: pypy/trunk/pypy/jit/tool/loopviewer.py ============================================================================== --- pypy/trunk/pypy/jit/tool/loopviewer.py (original) +++ pypy/trunk/pypy/jit/tool/loopviewer.py Tue Sep 28 14:55:13 2010 @@ -1,30 +1,48 @@ #!/usr/bin/env python -""" Usage: loopviewer.py [loopnum] loopfile +""" +Parse and display the traces produced by pypy-c-jit when PYPYLOG is set. """ import autopath import py import sys +import optparse +from pprint import pprint from pypy.tool import logparser from pypy.jit.metainterp.test.oparser import parse from pypy.jit.metainterp.history import ConstInt from pypy.rpython.lltypesystem import llmemory, lltype -def main(loopnum, loopfile): +def main(loopfile, options): + print 'Loading loop:' log = logparser.parse_log_file(loopfile) + print loops = logparser.extract_category(log, "jit-log-opt-") - inp = loops[loopnum] + inp = loops[options.loopnum] loop = parse(inp, no_namespace=True) - loop.show() + if not options.quiet: + loop.show() + if options.summary: + print 'Summary:' + print_summary(loop.summary()) + +def print_summary(summary): + keys = sorted(summary) + for key in keys: + print '%4d' % summary[key], key if __name__ == '__main__': - if len(sys.argv) == 2: - loopnum = -1 - loopfile = sys.argv[1] - elif len(sys.argv) == 3: - loopnum = int(sys.argv[1]) - loopfile = sys.argv[2] - else: - print __doc__ - sys.exit(1) - main(loopnum, loopfile) + parser = optparse.OptionParser(usage="%prog loopfile [options]") + parser.add_option('-n', '--loopnum', dest='loopnum', default=-1, metavar='N', type=int, + help='show the loop number N [default: last]') + parser.add_option('-s', '--summary', dest='summary', action='store_true', default=False, + help='print a summary of the operations in the loop') + parser.add_option('-q', '--quiet', dest='quiet', action='store_true', default=False, + help='do not show the graphical representation of the loop') + + options, args = parser.parse_args() + if len(args) != 1: + parser.print_help() + sys.exit(2) + + main(args[0], options) From cfbolz at codespeak.net Tue Sep 28 14:56:07 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 28 Sep 2010 14:56:07 +0200 (CEST) Subject: [pypy-svn] r77445 - pypy/extradoc/talk/pepm2011 Message-ID: <20100928125607.42BCD282BDD@codespeak.net> Author: cfbolz Date: Tue Sep 28 14:56:05 2010 New Revision: 77445 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: minimally describe bridges Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Tue Sep 28 14:56:05 2010 @@ -241,11 +241,10 @@ on. These guards are the only mechanism to stop the execution of a trace, the loop end condition also takes the form of a guard. -bridges? - -arguments to traces - -getting from the interpreter to traces +If one specific guard fails often enough, the tracing JIT will generate a new +trace that starts exactly at the position of the failing guard. The existing +assembler is patched to jump to the new trace when the guard fails +\cite{andreas_gal_incremental_2006}. \subsection{Running Example} From antocuni at codespeak.net Tue Sep 28 15:03:58 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Tue, 28 Sep 2010 15:03:58 +0200 (CEST) Subject: [pypy-svn] r77446 - pypy/trunk/pypy/jit/tool Message-ID: <20100928130358.C9B02282BDD@codespeak.net> Author: antocuni Date: Tue Sep 28 15:03:57 2010 New Revision: 77446 Modified: pypy/trunk/pypy/jit/tool/loopviewer.py Log: add the possibility to consider all loops in a file, and to print an accumulated summary Modified: pypy/trunk/pypy/jit/tool/loopviewer.py ============================================================================== --- pypy/trunk/pypy/jit/tool/loopviewer.py (original) +++ pypy/trunk/pypy/jit/tool/loopviewer.py Tue Sep 28 15:03:57 2010 @@ -14,17 +14,24 @@ from pypy.rpython.lltypesystem import llmemory, lltype def main(loopfile, options): - print 'Loading loop:' + print 'Loading file:' log = logparser.parse_log_file(loopfile) print loops = logparser.extract_category(log, "jit-log-opt-") - inp = loops[options.loopnum] - loop = parse(inp, no_namespace=True) + if options.loopnum is None: + input_loops = loops + else: + input_loops = [loops[options.loopnum]] + loops = [parse(inp, no_namespace=True) for inp in input_loops] if not options.quiet: - loop.show() + for loop in loops: + loop.show() if options.summary: + summary = {} + for loop in loops: + summary = loop.summary(summary) print 'Summary:' - print_summary(loop.summary()) + print_summary(summary) def print_summary(summary): keys = sorted(summary) @@ -35,8 +42,10 @@ parser = optparse.OptionParser(usage="%prog loopfile [options]") parser.add_option('-n', '--loopnum', dest='loopnum', default=-1, metavar='N', type=int, help='show the loop number N [default: last]') + parser.add_option('-a', '--all', dest='loopnum', action='store_const', const=None, + help='show all loops in the file') parser.add_option('-s', '--summary', dest='summary', action='store_true', default=False, - help='print a summary of the operations in the loop') + help='print a summary of the operations in the loop(s)') parser.add_option('-q', '--quiet', dest='quiet', action='store_true', default=False, help='do not show the graphical representation of the loop') From cfbolz at codespeak.net Tue Sep 28 15:37:57 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 28 Sep 2010 15:37:57 +0200 (CEST) Subject: [pypy-svn] r77447 - pypy/extradoc/talk/pepm2011 Message-ID: <20100928133757.E8654282BDD@codespeak.net> Author: cfbolz Date: Tue Sep 28 15:37:56 2010 New Revision: 77447 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: email addresses, no clue how to fit affiliations Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Tue Sep 28 15:37:56 2010 @@ -56,7 +56,8 @@ \authorinfo{Carl Friedrich Bolz \and Antonio Cuni \and Maciej Fija?kowski \and Samuele Pedroni \and Armin Rigo} {Heinrich-Heine-Universit?t D?sseldorf, STUPS Group, Germany XXX} - {cfbolz at gmx.de XXX} + {cfbolz at gmx.de, antocuni at gmail.com, fijal at merlinux.eu, + samuele.pedroni at gmail.com, arigo at tunes.org} %\numberofauthors{3} %\author{ @@ -108,7 +109,7 @@ severe in statically typed languages. Boxing of primitive types means that dynamic languages need to be able to handle -all objects, even integers, floats, bools etc. in the same way as user-defined +all objects, even integers, floats, booleans etc. in the same way as user-defined instances. Thus those primitive types are usually \emph{boxed}, i.e. a small heap-structure is allocated for them, that contains the actual value. Boxing primitive types can be very costly, because a lot of common operations, @@ -813,6 +814,8 @@ \textbf{twisted\_names}: A DNS server benchmark using the Twisted networking framework\footnote{\texttt{http://twistedmatrix.com/trac/}}. + + \section{Related Work} \label{sec:related} From cfbolz at codespeak.net Tue Sep 28 15:55:26 2010 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Tue, 28 Sep 2010 15:55:26 +0200 (CEST) Subject: [pypy-svn] r77448 - pypy/extradoc/talk/pepm2011 Message-ID: <20100928135526.834DF282BDD@codespeak.net> Author: cfbolz Date: Tue Sep 28 15:55:25 2010 New Revision: 77448 Added: pypy/extradoc/talk/pepm2011/escape-tracing.pdf (contents, props changed) Log: add pdf Added: pypy/extradoc/talk/pepm2011/escape-tracing.pdf ============================================================================== Binary file. No diff available. From arigo at codespeak.net Tue Sep 28 16:14:24 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 28 Sep 2010 16:14:24 +0200 (CEST) Subject: [pypy-svn] r77449 - in pypy/trunk/pypy/rpython: lltypesystem memory/gc Message-ID: <20100928141424.18274282BDD@codespeak.net> Author: arigo Date: Tue Sep 28 16:14:22 2010 New Revision: 77449 Modified: pypy/trunk/pypy/rpython/lltypesystem/llarena.py pypy/trunk/pypy/rpython/memory/gc/minimark.py pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py Log: Tweaks and comments. Modified: pypy/trunk/pypy/rpython/lltypesystem/llarena.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/llarena.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/llarena.py Tue Sep 28 16:14:22 2010 @@ -479,10 +479,11 @@ def llimpl_arena_malloc(nbytes, zero): addr = llimpl_malloc(nbytes) - if zero and bool(addr): - clear_large_memory_chunk(addr, nbytes) + if bool(addr): + llimpl_arena_reset(addr, nbytes, zero) return addr -register_external(arena_malloc, [int, bool], llmemory.Address, +llimpl_arena_malloc._always_inline_ = True +register_external(arena_malloc, [int, int], llmemory.Address, 'll_arena.arena_malloc', llimpl=llimpl_arena_malloc, llfakeimpl=arena_malloc, @@ -499,6 +500,7 @@ clear_large_memory_chunk(arena_addr, size) else: llmemory.raw_memclear(arena_addr, size) +llimpl_arena_reset._always_inline_ = True register_external(arena_reset, [llmemory.Address, int, int], None, 'll_arena.arena_reset', llimpl=llimpl_arena_reset, Modified: pypy/trunk/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/minimark.py Tue Sep 28 16:14:22 2010 @@ -101,9 +101,10 @@ # fall-back number. "nursery_size": 896*1024, - # The system page size. Like obmalloc.c, we assume that it is 4K, - # which is OK for most systems. - "page_size": 4096, + # The system page size. Like obmalloc.c, we assume that it is 4K + # for 32-bit systems; unlike obmalloc.c, we assume that it is 8K + # for 64-bit systems, for consistent results. + "page_size": 1024*WORD, # The size of an arena. Arenas are groups of pages allocated # together. @@ -279,7 +280,7 @@ # in malloc_fixedsize_clear(). The few extra pages are never used # anyway so it doesn't even count. extra = self.nonlarge_gcptrs_max + 1 - self.nursery = llarena.arena_malloc(self.nursery_size + extra, True) + self.nursery = llarena.arena_malloc(self.nursery_size + extra, 2) if not self.nursery: raise MemoryError("cannot allocate nursery") # the current position in the nursery: @@ -523,15 +524,11 @@ # Allocate the object using arena_malloc(), which we assume here # is just the same as raw_malloc(), but allows the extra # flexibility of saying that we have extra words in the header. - arena = llarena.arena_malloc(allocsize, False) + # The memory returned is cleared by a raw_memclear(). + arena = llarena.arena_malloc(allocsize, 2) if not arena: raise MemoryError("cannot allocate large object") # - # Clear it using method 2 of llarena.arena_reset(), which is the - # same as just a raw_memclear(). This also clears the card mark - # bits, if any. - llarena.arena_reset(arena, allocsize, 2) - # # Reserve the card mark bits as a list of single bytes # (the loop is empty in C). i = 0 Modified: pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py Tue Sep 28 16:14:22 2010 @@ -39,6 +39,9 @@ # -- The chained list of free blocks. If there are none, points to the # first uninitialized block. ('freeblock', llmemory.Address), + # -- The structure above is 4 words, which is a good value: + # '(1024-4) % N' is zero or very small for various small N's, + # i.e. there is not much wasted space. ) PAGE_PTR.TO.become(PAGE_HEADER) PAGE_NULL = lltype.nullptr(PAGE_HEADER) From arigo at codespeak.net Tue Sep 28 16:17:33 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 28 Sep 2010 16:17:33 +0200 (CEST) Subject: [pypy-svn] r77450 - pypy/trunk/pypy/rpython/memory/gc Message-ID: <20100928141733.E4862282BDD@codespeak.net> Author: arigo Date: Tue Sep 28 16:17:32 2010 New Revision: 77450 Modified: pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py Log: More tweaks. Modified: pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py Tue Sep 28 16:17:32 2010 @@ -4,6 +4,7 @@ from pypy.rlib.debug import ll_assert WORD = LONG_BIT // 8 +WORD_POWER_2 = {32: 2, 64: 3}[LONG_BIT] NULL = llmemory.NULL @@ -90,7 +91,7 @@ self.total_memory_used += nsize # # Get the page to use from the size - size_class = nsize / WORD + size_class = nsize >> WORD_POWER_2 page = self.page_for_size[size_class] if page == PAGE_NULL: page = self.allocate_new_page(size_class) @@ -193,7 +194,7 @@ self.total_memory_used = r_uint(0) # # For each size class: - size_class = self.small_request_threshold / WORD + size_class = self.small_request_threshold >> WORD_POWER_2 while size_class >= 1: # # Walk the pages in 'page_for_size[size_class]' and From antocuni at codespeak.net Tue Sep 28 16:25:06 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Tue, 28 Sep 2010 16:25:06 +0200 (CEST) Subject: [pypy-svn] r77451 - pypy/extradoc/talk/pepm2011 Message-ID: <20100928142506.3DFA1282BDD@codespeak.net> Author: antocuni Date: Tue Sep 28 16:25:04 2010 New Revision: 77451 Modified: pypy/extradoc/talk/pepm2011/paper.tex Log: fix my email address Modified: pypy/extradoc/talk/pepm2011/paper.tex ============================================================================== --- pypy/extradoc/talk/pepm2011/paper.tex (original) +++ pypy/extradoc/talk/pepm2011/paper.tex Tue Sep 28 16:25:04 2010 @@ -56,7 +56,7 @@ \authorinfo{Carl Friedrich Bolz \and Antonio Cuni \and Maciej Fija?kowski \and Samuele Pedroni \and Armin Rigo} {Heinrich-Heine-Universit?t D?sseldorf, STUPS Group, Germany XXX} - {cfbolz at gmx.de, antocuni at gmail.com, fijal at merlinux.eu, + {cfbolz at gmx.de, anto.cuni at gmail.com, fijal at merlinux.eu, samuele.pedroni at gmail.com, arigo at tunes.org} %\numberofauthors{3} From fijal at codespeak.net Tue Sep 28 16:25:54 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Tue, 28 Sep 2010 16:25:54 +0200 (CEST) Subject: [pypy-svn] r77452 - in pypy/trunk: . pypy/jit/backend/llgraph pypy/jit/backend/test pypy/jit/backend/x86 pypy/jit/backend/x86/test pypy/jit/codewriter pypy/jit/codewriter/test pypy/jit/metainterp pypy/jit/metainterp/optimizeopt pypy/jit/metainterp/test pypy/objspace/std pypy/rpython pypy/rpython/lltypesystem Message-ID: <20100928142554.01FDE282BDD@codespeak.net> Author: fijal Date: Tue Sep 28 16:25:50 2010 New Revision: 77452 Added: pypy/trunk/pypy/jit/backend/x86/test/test_string.py - copied unchanged from r77432, pypy/branch/jit-str/pypy/jit/backend/x86/test/test_string.py pypy/trunk/pypy/jit/metainterp/optimizeopt/string.py - copied, changed from r77432, pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/string.py Modified: pypy/trunk/ (props changed) pypy/trunk/pypy/jit/backend/llgraph/llimpl.py pypy/trunk/pypy/jit/backend/test/runner_test.py pypy/trunk/pypy/jit/backend/x86/assembler.py pypy/trunk/pypy/jit/backend/x86/codebuf.py pypy/trunk/pypy/jit/backend/x86/regalloc.py pypy/trunk/pypy/jit/backend/x86/rx86.py pypy/trunk/pypy/jit/codewriter/assembler.py pypy/trunk/pypy/jit/codewriter/call.py pypy/trunk/pypy/jit/codewriter/codewriter.py pypy/trunk/pypy/jit/codewriter/effectinfo.py pypy/trunk/pypy/jit/codewriter/jtransform.py pypy/trunk/pypy/jit/codewriter/support.py pypy/trunk/pypy/jit/codewriter/test/test_jtransform.py pypy/trunk/pypy/jit/codewriter/test/test_list.py pypy/trunk/pypy/jit/codewriter/test/test_support.py pypy/trunk/pypy/jit/metainterp/blackhole.py pypy/trunk/pypy/jit/metainterp/executor.py pypy/trunk/pypy/jit/metainterp/history.py pypy/trunk/pypy/jit/metainterp/optimizefindnode.py pypy/trunk/pypy/jit/metainterp/optimizeopt/__init__.py pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py (contents, props changed) pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py pypy/trunk/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/trunk/pypy/jit/metainterp/optimizeutil.py pypy/trunk/pypy/jit/metainterp/pyjitpl.py pypy/trunk/pypy/jit/metainterp/resoperation.py pypy/trunk/pypy/jit/metainterp/resume.py pypy/trunk/pypy/jit/metainterp/simple_optimize.py pypy/trunk/pypy/jit/metainterp/test/oparser.py pypy/trunk/pypy/jit/metainterp/test/test_optimizefindnode.py pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py pypy/trunk/pypy/jit/metainterp/test/test_resume.py pypy/trunk/pypy/jit/metainterp/test/test_string.py pypy/trunk/pypy/jit/metainterp/test/test_ztranslation.py pypy/trunk/pypy/jit/metainterp/warmstate.py pypy/trunk/pypy/objspace/std/stringtype.py pypy/trunk/pypy/rpython/annlowlevel.py pypy/trunk/pypy/rpython/lltypesystem/rlist.py pypy/trunk/pypy/rpython/lltypesystem/rstr.py Log: (arigo, fijal merging) Merge jit-str branch. This branch allows JIT to do something saner about string operations (for example virtualize a slice) Modified: pypy/trunk/pypy/jit/backend/llgraph/llimpl.py ============================================================================== --- pypy/trunk/pypy/jit/backend/llgraph/llimpl.py (original) +++ pypy/trunk/pypy/jit/backend/llgraph/llimpl.py Tue Sep 28 16:25:50 2010 @@ -1382,6 +1382,20 @@ uni = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string) uni.chars[index] = unichr(newvalue) +def do_copystrcontent(src, dst, srcstart, dststart, length): + src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), src) + dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), dst) + assert 0 <= srcstart <= srcstart + length <= len(src.chars) + assert 0 <= dststart <= dststart + length <= len(dst.chars) + rstr.copy_string_contents(src, dst, srcstart, dststart, length) + +def do_copyunicodecontent(src, dst, srcstart, dststart, length): + src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), src) + dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), dst) + assert 0 <= srcstart <= srcstart + length <= len(src.chars) + assert 0 <= dststart <= dststart + length <= len(dst.chars) + rstr.copy_unicode_contents(src, dst, srcstart, dststart, length) + # ---------- call ---------- _call_args_i = [] Modified: pypy/trunk/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/trunk/pypy/jit/backend/test/runner_test.py (original) +++ pypy/trunk/pypy/jit/backend/test/runner_test.py Tue Sep 28 16:25:50 2010 @@ -816,6 +816,23 @@ r = self.execute_operation(rop.STRGETITEM, [s_box, BoxInt(4)], 'int') assert r.value == 153 + def test_copystrcontent(self): + s_box = self.alloc_string("abcdef") + for s_box in [s_box, s_box.constbox()]: + for srcstart_box in [BoxInt(2), ConstInt(2)]: + for dststart_box in [BoxInt(3), ConstInt(3)]: + for length_box in [BoxInt(4), ConstInt(4)]: + for r_box_is_const in [False, True]: + r_box = self.alloc_string("!???????!") + if r_box_is_const: + r_box = r_box.constbox() + self.execute_operation(rop.COPYSTRCONTENT, + [s_box, r_box, + srcstart_box, + dststart_box, + length_box], 'void') + assert self.look_string(r_box) == "!??cdef?!" + def test_do_unicode_basic(self): u = self.cpu.bh_newunicode(5) self.cpu.bh_unicodesetitem(u, 4, 123) @@ -1199,6 +1216,10 @@ s_box = BoxPtr(lltype.cast_opaque_ptr(llmemory.GCREF, s)) return s_box + def look_string(self, string_box): + s = string_box.getref(lltype.Ptr(rstr.STR)) + return ''.join(s.chars) + def alloc_unicode(self, unicode): u = rstr.mallocunicode(len(unicode)) for i in range(len(unicode)): Modified: pypy/trunk/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/assembler.py (original) +++ pypy/trunk/pypy/jit/backend/x86/assembler.py Tue Sep 28 16:25:50 2010 @@ -181,6 +181,7 @@ self.malloc_fixedsize_slowpath1 = 0 self.malloc_fixedsize_slowpath2 = 0 self.pending_guard_tokens = None + self.memcpy_addr = 0 self.setup_failure_recovery() self._debug = False self.debug_counter_descr = cpu.fielddescrof(DEBUG_COUNTER, 'i') @@ -212,6 +213,7 @@ ll_new_unicode = gc_ll_descr.get_funcptr_for_newunicode() self.malloc_unicode_func_addr = rffi.cast(lltype.Signed, ll_new_unicode) + self.memcpy_addr = self.cpu.cast_ptr_to_int(codebuf.memcpy_fn) self.mc = MachineCodeBlockWrapper(self, self.mc_size, self.cpu.profile_agent) self._build_failure_recovery(False) self._build_failure_recovery(True) @@ -709,8 +711,8 @@ self.regalloc_perform_with_guard(None, guard_op, faillocs, arglocs, resloc, current_depths) - def load_effective_addr(self, sizereg, baseofs, scale, result): - self.mc.LEA(result, addr_add(imm(0), sizereg, baseofs, scale)) + def load_effective_addr(self, sizereg, baseofs, scale, result, frm=imm(0)): + self.mc.LEA(result, addr_add(frm, sizereg, baseofs, scale)) def _unaryop(asmop): def genop_unary(self, op, arglocs, resloc): Modified: pypy/trunk/pypy/jit/backend/x86/codebuf.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/codebuf.py (original) +++ pypy/trunk/pypy/jit/backend/x86/codebuf.py Tue Sep 28 16:25:50 2010 @@ -1,6 +1,6 @@ import os, sys -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.jit.backend.x86.rx86 import X86_32_CodeBuilder, X86_64_CodeBuilder from pypy.jit.backend.x86.regloc import LocationCodeBuilder @@ -164,6 +164,12 @@ # ____________________________________________________________ +memcpy_fn = rffi.llexternal('memcpy', [llmemory.Address, llmemory.Address, + rffi.SIZE_T], lltype.Void, + sandboxsafe=True, _nowrapper=True) + +# ____________________________________________________________ + if sys.platform == 'win32': ensure_sse2_floats = lambda : None else: Modified: pypy/trunk/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/trunk/pypy/jit/backend/x86/regalloc.py Tue Sep 28 16:25:50 2010 @@ -954,6 +954,41 @@ consider_unicodegetitem = consider_strgetitem + def consider_copystrcontent(self, op): + # compute the source address + base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) + ofs_loc = self.rm.make_sure_var_in_reg(op.args[2], op.args) + self.rm.possibly_free_var(op.args[0]) + self.rm.possibly_free_var(op.args[2]) + srcaddr_box = TempBox() + srcaddr_loc = self.rm.force_allocate_reg(srcaddr_box) + self._gen_address_inside_string(base_loc, ofs_loc, srcaddr_loc) + # compute the destination address + base_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args) + ofs_loc = self.rm.make_sure_var_in_reg(op.args[3], op.args) + self.rm.possibly_free_var(op.args[1]) + self.rm.possibly_free_var(op.args[3]) + dstaddr_box = TempBox() + dstaddr_loc = self.rm.force_allocate_reg(dstaddr_box) + self._gen_address_inside_string(base_loc, ofs_loc, dstaddr_loc) + # call memcpy() + length_loc = self.loc(op.args[4]) + self.rm.before_call() + self.xrm.before_call() + self.assembler._emit_call(imm(self.assembler.memcpy_addr), + [dstaddr_loc, srcaddr_loc, length_loc]) + self.rm.possibly_free_var(op.args[4]) + self.rm.possibly_free_var(dstaddr_box) + self.rm.possibly_free_var(srcaddr_box) + + def _gen_address_inside_string(self, baseloc, ofsloc, resloc): + cpu = self.assembler.cpu + ofs_items, itemsize, _ = symbolic.get_array_token(rstr.STR, + self.translate_support_code) + assert itemsize == 1 + self.assembler.load_effective_addr(ofsloc, ofs_items, 0, + resloc, baseloc) + def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None Modified: pypy/trunk/pypy/jit/backend/x86/rx86.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/rx86.py (original) +++ pypy/trunk/pypy/jit/backend/x86/rx86.py Tue Sep 28 16:25:50 2010 @@ -506,6 +506,7 @@ LEA32_rb = insn(rex_w, '\x8D', register(1,8),stack_bp(2,force_32bits=True)) LEA_ra = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_scaled_reg_plus_const(2)) LEA_rm = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_const(2)) + LEA_rj = insn(rex_w, '\x8D', register(1, 8), '\x05', immediate(2)) CALL_l = insn('\xE8', relative(1)) CALL_r = insn(rex_nw, '\xFF', register(1), chr(0xC0 | (2<<3))) Modified: pypy/trunk/pypy/jit/codewriter/assembler.py ============================================================================== --- pypy/trunk/pypy/jit/codewriter/assembler.py (original) +++ pypy/trunk/pypy/jit/codewriter/assembler.py Tue Sep 28 16:25:50 2010 @@ -232,3 +232,11 @@ return addr = llmemory.cast_ptr_to_adr(value) self.list_of_addr2name.append((addr, name)) + + def finished(self): + # Helper called at the end of assembling. Registers the extra + # functions shown in _callinfo_for_oopspec. + from pypy.jit.codewriter.effectinfo import _callinfo_for_oopspec + for _, func in _callinfo_for_oopspec.values(): + func = heaptracker.int2adr(func) + self.see_raw_object(func.ptr) Modified: pypy/trunk/pypy/jit/codewriter/call.py ============================================================================== --- pypy/trunk/pypy/jit/codewriter/call.py (original) +++ pypy/trunk/pypy/jit/codewriter/call.py Tue Sep 28 16:25:50 2010 @@ -185,7 +185,7 @@ FUNC.RESULT) return (fnaddr, calldescr) - def getcalldescr(self, op): + def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE): """Return the calldescr that describes all calls done by 'op'. This returns a calldescr that we can put in the corresponding call operation in the calling jitcode. It gets an effectinfo @@ -226,7 +226,8 @@ extraeffect = EffectInfo.EF_CANNOT_RAISE # effectinfo = effectinfo_from_writeanalyze( - self.readwrite_analyzer.analyze(op), self.cpu, extraeffect) + self.readwrite_analyzer.analyze(op), self.cpu, extraeffect, + oopspecindex) # if pure or loopinvariant: assert effectinfo is not None Modified: pypy/trunk/pypy/jit/codewriter/codewriter.py ============================================================================== --- pypy/trunk/pypy/jit/codewriter/codewriter.py (original) +++ pypy/trunk/pypy/jit/codewriter/codewriter.py Tue Sep 28 16:25:50 2010 @@ -73,6 +73,7 @@ count += 1 if not count % 500: log.info("Produced %d jitcodes" % count) + self.assembler.finished() heaptracker.finish_registering(self.cpu) log.info("there are %d JitCode instances." % count) Modified: pypy/trunk/pypy/jit/codewriter/effectinfo.py ============================================================================== --- pypy/trunk/pypy/jit/codewriter/effectinfo.py (original) +++ pypy/trunk/pypy/jit/codewriter/effectinfo.py Tue Sep 28 16:25:50 2010 @@ -15,13 +15,32 @@ EF_LOOPINVARIANT = 3 #special: call it only once per loop EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE = 4 #can raise and force virtualizables + # the 'oopspecindex' field is one of the following values: + OS_NONE = 0 # normal case, no oopspec + OS_ARRAYCOPY = 1 # "list.ll_arraycopy" + OS_STR_CONCAT = 2 # "stroruni.concat" + OS_UNI_CONCAT = 3 # "stroruni.concat" + OS_STR_SLICE = 4 # "stroruni.slice" + OS_UNI_SLICE = 5 # "stroruni.slice" + OS_STR_EQUAL = 6 # "stroruni.equal" + OS_UNI_EQUAL = 7 # "stroruni.equal" + OS_STREQ_SLICE_CHECKNULL = 8 # s2!=NULL and s1[x:x+length]==s2 + OS_STREQ_SLICE_NONNULL = 9 # s1[x:x+length]==s2 (assert s2!=NULL) + OS_STREQ_SLICE_CHAR = 10 # s1[x:x+length]==char + OS_STREQ_NONNULL = 11 # s1 == s2 (assert s1!=NULL,s2!=NULL) + OS_STREQ_NONNULL_CHAR = 12 # s1 == char (assert s1!=NULL) + OS_STREQ_CHECKNULL_CHAR = 13 # s1!=NULL and s1==char + OS_STREQ_LENGTHOK = 14 # s1 == s2 (assert len(s1)==len(s2)) + def __new__(cls, readonly_descrs_fields, write_descrs_fields, write_descrs_arrays, - extraeffect=EF_CAN_RAISE): + extraeffect=EF_CAN_RAISE, + oopspecindex=OS_NONE): key = (frozenset(readonly_descrs_fields), frozenset(write_descrs_fields), frozenset(write_descrs_arrays), - extraeffect) + extraeffect, + oopspecindex) if key in cls._cache: return cls._cache[key] result = object.__new__(cls) @@ -29,6 +48,7 @@ result.write_descrs_fields = write_descrs_fields result.write_descrs_arrays = write_descrs_arrays result.extraeffect = extraeffect + result.oopspecindex = oopspecindex cls._cache[key] = result return result @@ -36,7 +56,8 @@ return self.extraeffect >= self.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE def effectinfo_from_writeanalyze(effects, cpu, - extraeffect=EffectInfo.EF_CAN_RAISE): + extraeffect=EffectInfo.EF_CAN_RAISE, + oopspecindex=EffectInfo.OS_NONE): from pypy.translator.backendopt.writeanalyze import top_set if effects is top_set: return None @@ -73,7 +94,8 @@ return EffectInfo(readonly_descrs_fields, write_descrs_fields, write_descrs_arrays, - extraeffect) + extraeffect, + oopspecindex) def consider_struct(TYPE, fieldname): if fieldType(TYPE, fieldname) is lltype.Void: @@ -104,3 +126,30 @@ def analyze_simple_operation(self, op): return op.opname in ('jit_force_virtualizable', 'jit_force_virtual') + +# ____________________________________________________________ + +_callinfo_for_oopspec = {} # {oopspecindex: (calldescr, func_as_int)} + +def callinfo_for_oopspec(oopspecindex): + """A function that returns the calldescr and the function + address (as an int) of one of the OS_XYZ functions defined above. + Don't use this if there might be several implementations of the same + OS_XYZ specialized by type, e.g. OS_ARRAYCOPY.""" + return _callinfo_for_oopspec[oopspecindex] + + +def _funcptr_for_oopspec_memo(oopspecindex): + from pypy.jit.codewriter import heaptracker + _, func_as_int = _callinfo_for_oopspec.get(oopspecindex, (None, 0)) + funcadr = heaptracker.int2adr(func_as_int) + return funcadr.ptr +_funcptr_for_oopspec_memo._annspecialcase_ = 'specialize:memo' + +def funcptr_for_oopspec(oopspecindex): + """A memo function that returns a pointer to the function described + by OS_XYZ (as a real low-level function pointer).""" + funcptr = _funcptr_for_oopspec_memo(oopspecindex) + assert funcptr + return funcptr +funcptr_for_oopspec._annspecialcase_ = 'specialize:arg(0)' Modified: pypy/trunk/pypy/jit/codewriter/jtransform.py ============================================================================== --- pypy/trunk/pypy/jit/codewriter/jtransform.py (original) +++ pypy/trunk/pypy/jit/codewriter/jtransform.py Tue Sep 28 16:25:50 2010 @@ -1,16 +1,18 @@ import py, sys -from pypy.rpython.lltypesystem import lltype, rstr, rclass +from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rclass from pypy.rpython import rlist from pypy.jit.metainterp.history import getkind from pypy.objspace.flow.model import SpaceOperation, Variable, Constant from pypy.objspace.flow.model import Block, Link, c_last_exception from pypy.jit.codewriter.flatten import ListOfKind, IndirectCallTargets from pypy.jit.codewriter import support, heaptracker +from pypy.jit.codewriter.effectinfo import EffectInfo, _callinfo_for_oopspec from pypy.jit.codewriter.policy import log from pypy.jit.metainterp.typesystem import deref, arrayItem from pypy.rlib import objectmodel from pypy.rlib.jit import _we_are_jitted from pypy.translator.simplify import get_funcobj +from pypy.translator.unsimplify import varoftype def transform_graph(graph, cpu=None, callcontrol=None, portal_jd=None): @@ -248,11 +250,13 @@ kind = self.callcontrol.guess_call_kind(op) return getattr(self, 'handle_%s_indirect_call' % kind)(op) - def rewrite_call(self, op, namebase, initialargs): + def rewrite_call(self, op, namebase, initialargs, args=None): """Turn 'i0 = direct_call(fn, i1, i2, ref1, ref2)' into 'i0 = xxx_call_ir_i(fn, descr, [i1,i2], [ref1,ref2])'. The name is one of '{residual,direct}_call_{r,ir,irf}_{i,r,f,v}'.""" - lst_i, lst_r, lst_f = self.make_three_lists(op.args[1:]) + if args is None: + args = op.args[1:] + lst_i, lst_r, lst_f = self.make_three_lists(args) reskind = getkind(op.result.concretetype)[0] if lst_f or reskind == 'f': kinds = 'irf' elif lst_i: kinds = 'ir' @@ -310,6 +314,8 @@ # dispatch to various implementations depending on the oopspec_name if oopspec_name.startswith('list.') or oopspec_name == 'newlist': prepare = self._handle_list_call + elif oopspec_name.startswith('stroruni.'): + prepare = self._handle_stroruni_call elif oopspec_name.startswith('virtual_ref'): prepare = self._handle_virtual_ref_call else: @@ -982,10 +988,7 @@ return extraop + [op] def do_fixed_list_ll_arraycopy(self, op, args, arraydescr): - calldescr = self.callcontrol.getcalldescr(op) - return SpaceOperation('arraycopy', - [calldescr, op.args[0]] + args + [arraydescr], - op.result) + return self._handle_oopspec_call(op, args, EffectInfo.OS_ARRAYCOPY) # ---------- resizable lists ---------- @@ -1023,6 +1026,92 @@ [args[0], lengthdescr], op.result) # ---------- + # Strings and Unicodes. + + def _handle_oopspec_call(self, op, args, oopspecindex): + calldescr = self.callcontrol.getcalldescr(op, oopspecindex) + if isinstance(op.args[0].value, str): + pass # for tests only + else: + func = heaptracker.adr2int( + llmemory.cast_ptr_to_adr(op.args[0].value)) + _callinfo_for_oopspec[oopspecindex] = calldescr, func + op1 = self.rewrite_call(op, 'residual_call', + [op.args[0], calldescr], + args=args) + if self.callcontrol.calldescr_canraise(calldescr): + op1 = [op1, SpaceOperation('-live-', [], None)] + return op1 + + def _register_extra_helper(self, oopspecindex, oopspec_name, + argtypes, resulttype): + # a bit hackish + if oopspecindex in _callinfo_for_oopspec: + return + c_func, TP = support.builtin_func_for_spec(self.cpu.rtyper, + oopspec_name, argtypes, + resulttype) + op = SpaceOperation('pseudo_call', + [c_func] + [varoftype(T) for T in argtypes], + varoftype(resulttype)) + calldescr = self.callcontrol.getcalldescr(op, oopspecindex) + func = heaptracker.adr2int( + llmemory.cast_ptr_to_adr(c_func.value)) + _callinfo_for_oopspec[oopspecindex] = calldescr, func + + def _handle_stroruni_call(self, op, oopspec_name, args): + if args[0].concretetype.TO == rstr.STR: + dict = {"stroruni.concat": EffectInfo.OS_STR_CONCAT, + "stroruni.slice": EffectInfo.OS_STR_SLICE, + "stroruni.equal": EffectInfo.OS_STR_EQUAL, + } + elif args[0].concretetype.TO == rstr.UNICODE: + dict = {"stroruni.concat": EffectInfo.OS_UNI_CONCAT, + "stroruni.slice": EffectInfo.OS_UNI_SLICE, + "stroruni.equal": EffectInfo.OS_UNI_EQUAL, + } + else: + assert 0, "args[0].concretetype must be STR or UNICODE" + # + if oopspec_name == "stroruni.equal": + SoU = args[0].concretetype # Ptr(STR) or Ptr(UNICODE) + for otherindex, othername, argtypes, resulttype in [ + + (EffectInfo.OS_STREQ_SLICE_CHECKNULL, + "str.eq_slice_checknull", + [SoU, lltype.Signed, lltype.Signed, SoU], + lltype.Signed), + (EffectInfo.OS_STREQ_SLICE_NONNULL, + "str.eq_slice_nonnull", + [SoU, lltype.Signed, lltype.Signed, SoU], + lltype.Signed), + (EffectInfo.OS_STREQ_SLICE_CHAR, + "str.eq_slice_char", + [SoU, lltype.Signed, lltype.Signed, lltype.Char], + lltype.Signed), + (EffectInfo.OS_STREQ_NONNULL, + "str.eq_nonnull", + [SoU, SoU], + lltype.Signed), + (EffectInfo.OS_STREQ_NONNULL_CHAR, + "str.eq_nonnull_char", + [SoU, lltype.Char], + lltype.Signed), + (EffectInfo.OS_STREQ_CHECKNULL_CHAR, + "str.eq_checknull_char", + [SoU, lltype.Char], + lltype.Signed), + (EffectInfo.OS_STREQ_LENGTHOK, + "str.eq_lengthok", + [SoU, SoU], + lltype.Signed), + ]: + self._register_extra_helper(otherindex, othername, + argtypes, resulttype) + # + return self._handle_oopspec_call(op, args, dict[oopspec_name]) + + # ---------- # VirtualRefs. def _handle_virtual_ref_call(self, op, oopspec_name, args): Modified: pypy/trunk/pypy/jit/codewriter/support.py ============================================================================== --- pypy/trunk/pypy/jit/codewriter/support.py (original) +++ pypy/trunk/pypy/jit/codewriter/support.py Tue Sep 28 16:25:50 2010 @@ -275,10 +275,86 @@ # ---------- strings and unicode ---------- - _ll_5_string_copy_contents = ll_rstr.copy_string_contents - _ll_1_str_str2unicode = ll_rstr.LLHelpers.ll_str2unicode - _ll_5_unicode_copy_contents = ll_rstr.copy_unicode_contents + + def _ll_4_str_eq_slice_checknull(s1, start, length, s2): + """str1[start : start + length] == str2.""" + if not s2: + return 0 + chars2 = s2.chars + if len(chars2) != length: + return 0 + j = 0 + chars1 = s1.chars + while j < length: + if chars1[start + j] != chars2[j]: + return 0 + j += 1 + return 1 + + def _ll_4_str_eq_slice_nonnull(s1, start, length, s2): + """str1[start : start + length] == str2, assuming str2 != NULL.""" + chars2 = s2.chars + if len(chars2) != length: + return 0 + j = 0 + chars1 = s1.chars + while j < length: + if chars1[start + j] != chars2[j]: + return 0 + j += 1 + return 1 + + def _ll_4_str_eq_slice_char(s1, start, length, c2): + """str1[start : start + length] == c2.""" + if length != 1: + return 0 + if s1.chars[start] != c2: + return 0 + return 1 + + def _ll_2_str_eq_nonnull(s1, s2): + len1 = len(s1.chars) + len2 = len(s2.chars) + if len1 != len2: + return 0 + j = 0 + chars1 = s1.chars + chars2 = s2.chars + while j < len1: + if chars1[j] != chars2[j]: + return 0 + j += 1 + return 1 + + def _ll_2_str_eq_nonnull_char(s1, c2): + chars = s1.chars + if len(chars) != 1: + return 0 + if chars[0] != c2: + return 0 + return 1 + + def _ll_2_str_eq_checknull_char(s1, c2): + if not s1: + return 0 + chars = s1.chars + if len(chars) != 1: + return 0 + if chars[0] != c2: + return 0 + return 1 + + def _ll_2_str_eq_lengthok(s1, s2): + j = 0 + chars1 = s1.chars + chars2 = s2.chars + len1 = len(chars1) + while j < len1: + if chars1[j] != chars2[j]: + return 0 + j += 1 + return 1 # ---------- malloc with del ---------- Modified: pypy/trunk/pypy/jit/codewriter/test/test_jtransform.py ============================================================================== --- pypy/trunk/pypy/jit/codewriter/test/test_jtransform.py (original) +++ pypy/trunk/pypy/jit/codewriter/test/test_jtransform.py Tue Sep 28 16:25:50 2010 @@ -1,11 +1,16 @@ +import py import random from pypy.objspace.flow.model import FunctionGraph, Block, Link from pypy.objspace.flow.model import SpaceOperation, Variable, Constant from pypy.jit.codewriter.jtransform import Transformer from pypy.jit.metainterp.history import getkind -from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr +from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rlist from pypy.translator.unsimplify import varoftype -from pypy.jit.codewriter import heaptracker +from pypy.jit.codewriter import heaptracker, effectinfo +from pypy.jit.codewriter.flatten import ListOfKind + +def const(x): + return Constant(x, lltype.typeOf(x)) class FakeRTyper: class type_system: name = 'lltypesystem' @@ -17,6 +22,8 @@ return ('calldescr', FUNC, ARGS, RESULT) def fielddescrof(self, STRUCT, name): return ('fielddescr', STRUCT, name) + def arraydescrof(self, ARRAY): + return FakeDescr(('arraydescr', ARRAY)) def sizeof(self, STRUCT): return FakeDescr(('sizedescr', STRUCT)) @@ -67,6 +74,14 @@ def calldescr_canraise(self, calldescr): return False +class FakeBuiltinCallControl: + def guess_call_kind(self, op): + return 'builtin' + def getcalldescr(self, op, oopspecindex): + return 'calldescr-%d' % oopspecindex + def calldescr_canraise(self, calldescr): + return False + def test_optimize_goto_if_not(): v1 = Variable() @@ -107,7 +122,7 @@ assert block.operations == [] assert block.exitswitch == ('int_gt', v1, v2) assert block.exits == exits - assert exits[1].args == [Constant(True, lltype.Bool)] + assert exits[1].args == [const(True)] def test_optimize_goto_if_not__unknownop(): v3 = Variable(); v3.concretetype = lltype.Bool @@ -159,8 +174,8 @@ 'float_gt': ('float_gt', 'float_lt'), } v3 = varoftype(lltype.Signed) - for v1 in [varoftype(lltype.Signed), Constant(42, lltype.Signed)]: - for v2 in [varoftype(lltype.Signed), Constant(43, lltype.Signed)]: + for v1 in [varoftype(lltype.Signed), const(42)]: + for v2 in [varoftype(lltype.Signed), const(43)]: for name1, name2 in ops.items(): op = SpaceOperation(name1, [v1, v2], v3) op1 = Transformer(FakeCPU()).rewrite_operation(op) @@ -177,8 +192,8 @@ def test_symmetric_int_add_ovf(): v3 = varoftype(lltype.Signed) - for v1 in [varoftype(lltype.Signed), Constant(42, lltype.Signed)]: - for v2 in [varoftype(lltype.Signed), Constant(43, lltype.Signed)]: + for v1 in [varoftype(lltype.Signed), const(42)]: + for v2 in [varoftype(lltype.Signed), const(43)]: op = SpaceOperation('int_add_nonneg_ovf', [v1, v2], v3) oplist = Transformer(FakeCPU()).rewrite_operation(op) op0, op1 = oplist @@ -218,7 +233,7 @@ def get_direct_call_op(argtypes, restype): FUNC = lltype.FuncType(argtypes, restype) fnptr = lltype.functionptr(FUNC, "g") # no graph - c_fnptr = Constant(fnptr, concretetype=lltype.typeOf(fnptr)) + c_fnptr = const(fnptr) vars = [varoftype(TYPE) for TYPE in argtypes] v_result = varoftype(restype) op = SpaceOperation('direct_call', [c_fnptr] + vars, v_result) @@ -465,7 +480,7 @@ v1 = varoftype(lltype.Signed) v2 = varoftype(lltype.Signed) v3 = varoftype(lltype.Bool) - c0 = Constant(0, lltype.Signed) + c0 = const(0) # for opname, reducedname in [('int_eq', 'int_is_zero'), ('int_ne', 'int_is_true')]: @@ -488,7 +503,7 @@ v1 = varoftype(rclass.OBJECTPTR) v2 = varoftype(rclass.OBJECTPTR) v3 = varoftype(lltype.Bool) - c0 = Constant(lltype.nullptr(rclass.OBJECT), rclass.OBJECTPTR) + c0 = const(lltype.nullptr(rclass.OBJECT)) # for opname, reducedname in [('ptr_eq', 'ptr_iszero'), ('ptr_ne', 'ptr_nonzero')]: @@ -511,7 +526,7 @@ v1 = varoftype(rclass.NONGCOBJECTPTR) v2 = varoftype(rclass.NONGCOBJECTPTR) v3 = varoftype(lltype.Bool) - c0 = Constant(lltype.nullptr(rclass.NONGCOBJECT), rclass.NONGCOBJECTPTR) + c0 = const(lltype.nullptr(rclass.NONGCOBJECT)) # for opname, reducedname in [('ptr_eq', 'int_is_zero'), ('ptr_ne', 'int_is_true')]: @@ -656,3 +671,119 @@ oplist = tr.rewrite_operation(op) assert oplist[0].opname == 'inline_call_ir_i' assert oplist[0].args[0] == 'somejitcode' + +def test_str_newstr(): + c_STR = Constant(rstr.STR, lltype.Void) + c_flavor = Constant({'flavor': 'gc'}, lltype.Void) + v1 = varoftype(lltype.Signed) + v2 = varoftype(lltype.Ptr(rstr.STR)) + op = SpaceOperation('malloc_varsize', [c_STR, c_flavor, v1], v2) + op1 = Transformer().rewrite_operation(op) + assert op1.opname == 'newstr' + assert op1.args == [v1] + assert op1.result == v2 + +def test_str_concat(): + # test that the oopspec is present and correctly transformed + PSTR = lltype.Ptr(rstr.STR) + FUNC = lltype.FuncType([PSTR, PSTR], PSTR) + func = lltype.functionptr(FUNC, 'll_strconcat', + _callable=rstr.LLHelpers.ll_strconcat) + v1 = varoftype(PSTR) + v2 = varoftype(PSTR) + v3 = varoftype(PSTR) + op = SpaceOperation('direct_call', [const(func), v1, v2], v3) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = tr.rewrite_operation(op) + assert op1.opname == 'residual_call_r_r' + assert op1.args[0].value == func + assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_STR_CONCAT + assert op1.args[2] == ListOfKind('ref', [v1, v2]) + assert op1.result == v3 + +def test_unicode_concat(): + # test that the oopspec is present and correctly transformed + PSTR = lltype.Ptr(rstr.UNICODE) + FUNC = lltype.FuncType([PSTR, PSTR], PSTR) + func = lltype.functionptr(FUNC, 'll_strconcat', + _callable=rstr.LLHelpers.ll_strconcat) + v1 = varoftype(PSTR) + v2 = varoftype(PSTR) + v3 = varoftype(PSTR) + op = SpaceOperation('direct_call', [const(func), v1, v2], v3) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = tr.rewrite_operation(op) + assert op1.opname == 'residual_call_r_r' + assert op1.args[0].value == func + assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_UNI_CONCAT + assert op1.args[2] == ListOfKind('ref', [v1, v2]) + assert op1.result == v3 + # + # check the callinfo_for_oopspec + got = effectinfo.callinfo_for_oopspec(effectinfo.EffectInfo.OS_UNI_CONCAT) + assert got[0] == op1.args[1] # the calldescr + assert heaptracker.int2adr(got[1]) == llmemory.cast_ptr_to_adr(func) + +def test_str_slice(): + # test that the oopspec is present and correctly transformed + PSTR = lltype.Ptr(rstr.STR) + INT = lltype.Signed + FUNC = lltype.FuncType([PSTR, INT, INT], PSTR) + func = lltype.functionptr(FUNC, '_ll_stringslice', + _callable=rstr.LLHelpers._ll_stringslice) + v1 = varoftype(PSTR) + v2 = varoftype(INT) + v3 = varoftype(INT) + v4 = varoftype(PSTR) + op = SpaceOperation('direct_call', [const(func), v1, v2, v3], v4) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = tr.rewrite_operation(op) + assert op1.opname == 'residual_call_ir_r' + assert op1.args[0].value == func + assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_STR_SLICE + assert op1.args[2] == ListOfKind('int', [v2, v3]) + assert op1.args[3] == ListOfKind('ref', [v1]) + assert op1.result == v4 + +def test_unicode_slice(): + # test that the oopspec is present and correctly transformed + PUNICODE = lltype.Ptr(rstr.UNICODE) + INT = lltype.Signed + FUNC = lltype.FuncType([PUNICODE, INT, INT], PUNICODE) + func = lltype.functionptr(FUNC, '_ll_stringslice', + _callable=rstr.LLHelpers._ll_stringslice) + v1 = varoftype(PUNICODE) + v2 = varoftype(INT) + v3 = varoftype(INT) + v4 = varoftype(PUNICODE) + op = SpaceOperation('direct_call', [const(func), v1, v2, v3], v4) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = tr.rewrite_operation(op) + assert op1.opname == 'residual_call_ir_r' + assert op1.args[0].value == func + assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_UNI_SLICE + assert op1.args[2] == ListOfKind('int', [v2, v3]) + assert op1.args[3] == ListOfKind('ref', [v1]) + assert op1.result == v4 + +def test_list_ll_arraycopy(): + from pypy.rlib.rgc import ll_arraycopy + LIST = lltype.GcArray(lltype.Signed) + PLIST = lltype.Ptr(LIST) + INT = lltype.Signed + FUNC = lltype.FuncType([PLIST]*2+[INT]*3, lltype.Void) + func = lltype.functionptr(FUNC, 'll_arraycopy', _callable=ll_arraycopy) + v1 = varoftype(PLIST) + v2 = varoftype(PLIST) + v3 = varoftype(INT) + v4 = varoftype(INT) + v5 = varoftype(INT) + v6 = varoftype(lltype.Void) + op = SpaceOperation('direct_call', [const(func), v1, v2, v3, v4, v5], v6) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = tr.rewrite_operation(op) + assert op1.opname == 'residual_call_ir_v' + assert op1.args[0].value == func + assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_ARRAYCOPY + assert op1.args[2] == ListOfKind('int', [v3, v4, v5]) + assert op1.args[3] == ListOfKind('ref', [v1, v2]) Modified: pypy/trunk/pypy/jit/codewriter/test/test_list.py ============================================================================== --- pypy/trunk/pypy/jit/codewriter/test/test_list.py (original) +++ pypy/trunk/pypy/jit/codewriter/test/test_list.py Tue Sep 28 16:25:50 2010 @@ -36,10 +36,16 @@ class FakeCallControl: class getcalldescr(AbstractDescr): - def __init__(self, op): + def __init__(self, op, oopspecindex=0): self.op = op + self.oopspecindex = oopspecindex def __repr__(self): - return '' + if self.oopspecindex == 0: + return '' + else: + return '' % self.oopspecindex + def calldescr_canraise(self, calldescr): + return False def builtin_test(oopspec_name, args, RESTYPE, expected): v_result = varoftype(RESTYPE) @@ -99,7 +105,7 @@ varoftype(lltype.Signed), varoftype(lltype.Signed)], lltype.Void, """ - arraycopy , $'myfunc', %r0, %r1, %i0, %i1, %i2, + residual_call_ir_v $'myfunc', , I[%i0, %i1, %i2], R[%r0, %r1] """) def test_fixed_getitem(): Modified: pypy/trunk/pypy/jit/codewriter/test/test_support.py ============================================================================== --- pypy/trunk/pypy/jit/codewriter/test/test_support.py (original) +++ pypy/trunk/pypy/jit/codewriter/test/test_support.py Tue Sep 28 16:25:50 2010 @@ -1,7 +1,8 @@ import py from pypy.rpython.lltypesystem import lltype +from pypy.rpython.annlowlevel import llstr from pypy.objspace.flow.model import Variable, Constant, SpaceOperation -from pypy.jit.codewriter.support import decode_builtin_call +from pypy.jit.codewriter.support import decode_builtin_call, LLtypeHelpers def newconst(x): return Constant(x, lltype.typeOf(x)) @@ -65,3 +66,70 @@ assert opargs == [newconst(myarray), newconst(2), vc, vi] #impl = runner.get_oopspec_impl('spam.foobar', lltype.Ptr(A)) #assert impl(myarray, 2, 'A', 5) == 42 * ord('A') + +def test_streq_slice_checknull(): + p1 = llstr("hello world") + p2 = llstr("wor") + func = LLtypeHelpers._ll_4_str_eq_slice_checknull.im_func + assert func(p1, 6, 3, p2) == True + assert func(p1, 6, 2, p2) == False + assert func(p1, 5, 3, p2) == False + assert func(p1, 2, 1, llstr(None)) == False + +def test_streq_slice_nonnull(): + p1 = llstr("hello world") + p2 = llstr("wor") + func = LLtypeHelpers._ll_4_str_eq_slice_nonnull.im_func + assert func(p1, 6, 3, p2) == True + assert func(p1, 6, 2, p2) == False + assert func(p1, 5, 3, p2) == False + py.test.raises(AttributeError, func, p1, 2, 1, llstr(None)) + +def test_streq_slice_char(): + p1 = llstr("hello world") + func = LLtypeHelpers._ll_4_str_eq_slice_char.im_func + assert func(p1, 6, 3, "w") == False + assert func(p1, 6, 0, "w") == False + assert func(p1, 6, 1, "w") == True + assert func(p1, 6, 1, "x") == False + +def test_streq_nonnull(): + p1 = llstr("wor") + p2 = llstr("wor") + assert p1 != p2 + func = LLtypeHelpers._ll_2_str_eq_nonnull.im_func + assert func(p1, p1) == True + assert func(p1, p2) == True + assert func(p1, llstr("wrl")) == False + assert func(p1, llstr("world")) == False + assert func(p1, llstr("w")) == False + py.test.raises(AttributeError, func, p1, llstr(None)) + py.test.raises(AttributeError, func, llstr(None), p2) + +def test_streq_nonnull_char(): + func = LLtypeHelpers._ll_2_str_eq_nonnull_char.im_func + assert func(llstr("wor"), "x") == False + assert func(llstr("w"), "x") == False + assert func(llstr(""), "x") == False + assert func(llstr("x"), "x") == True + py.test.raises(AttributeError, func, llstr(None), "x") + +def test_streq_checknull_char(): + func = LLtypeHelpers._ll_2_str_eq_checknull_char.im_func + assert func(llstr("wor"), "x") == False + assert func(llstr("w"), "x") == False + assert func(llstr(""), "x") == False + assert func(llstr("x"), "x") == True + assert func(llstr(None), "x") == False + +def test_streq_lengthok(): + p1 = llstr("wor") + p2 = llstr("wor") + assert p1 != p2 + func = LLtypeHelpers._ll_2_str_eq_lengthok.im_func + assert func(p1, p1) == True + assert func(p1, p2) == True + assert func(p1, llstr("wrl")) == False + py.test.raises(IndexError, func, p1, llstr("w")) + py.test.raises(AttributeError, func, p1, llstr(None)) + py.test.raises(AttributeError, func, llstr(None), p2) Modified: pypy/trunk/pypy/jit/metainterp/blackhole.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/blackhole.py (original) +++ pypy/trunk/pypy/jit/metainterp/blackhole.py Tue Sep 28 16:25:50 2010 @@ -1024,10 +1024,6 @@ def bhimpl_arraylen_gc(cpu, array, arraydescr): return cpu.bh_arraylen_gc(arraydescr, array) - @arguments("cpu", "d", "i", "r", "r", "i", "i", "i", "d") - def bhimpl_arraycopy(cpu, calldescr, func, x1, x2, x3, x4, x5, arraydescr): - cpu.bh_call_v(func, calldescr, [x3, x4, x5], [x1, x2], None) - @arguments("cpu", "r", "d", "d", "i", returns="i") def bhimpl_getarrayitem_vable_i(cpu, vable, fielddescr, arraydescr, index): array = cpu.bh_getfield_gc_r(vable, fielddescr) Modified: pypy/trunk/pypy/jit/metainterp/executor.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/executor.py (original) +++ pypy/trunk/pypy/jit/metainterp/executor.py Tue Sep 28 16:25:50 2010 @@ -2,7 +2,7 @@ """ import py -from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.lltypesystem import lltype, llmemory, rstr from pypy.rpython.ootypesystem import ootype from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.rarithmetic import ovfcheck, r_uint, intmask @@ -165,12 +165,6 @@ def do_new_with_vtable(cpu, _, clsbox): return BoxPtr(exec_new_with_vtable(cpu, clsbox)) -def do_arraycopy(cpu, _, calldescr, funcbox, x1box, x2box, - x3box, x4box, x5box, arraydescr): - cpu.bh_call_v(funcbox.getint(), calldescr, - [x3box.getint(), x4box.getint(), x5box.getint()], - [x1box.getref_base(), x2box.getref_base()], None) - def do_int_add_ovf(cpu, metainterp, box1, box2): # the overflow operations can be called without a metainterp, if an # overflow cannot occur @@ -209,6 +203,24 @@ def do_same_as(cpu, _, box): return box.clonebox() +def do_copystrcontent(cpu, _, srcbox, dstbox, + srcstartbox, dststartbox, lengthbox): + src = srcbox.getptr(lltype.Ptr(rstr.STR)) + dst = dstbox.getptr(lltype.Ptr(rstr.STR)) + srcstart = srcstartbox.getint() + dststart = dststartbox.getint() + length = lengthbox.getint() + rstr.copy_string_contents(src, dst, srcstart, dststart, length) + +def do_copyunicodecontent(cpu, _, srcbox, dstbox, + srcstartbox, dststartbox, lengthbox): + src = srcbox.getptr(lltype.Ptr(rstr.UNICODE)) + dst = dstbox.getptr(lltype.Ptr(rstr.UNICODE)) + srcstart = srcstartbox.getint() + dststart = dststartbox.getint() + length = lengthbox.getint() + rstr.copy_unicode_contents(src, dst, srcstart, dststart, length) + # ____________________________________________________________ ##def do_force_token(cpu): Modified: pypy/trunk/pypy/jit/metainterp/history.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/history.py (original) +++ pypy/trunk/pypy/jit/metainterp/history.py Tue Sep 28 16:25:50 2010 @@ -685,6 +685,19 @@ return llmemory.cast_adr_to_int(adr, "emulated") return i +def get_const_ptr_for_string(s): + from pypy.rpython.annlowlevel import llstr + if not we_are_translated(): + try: + return _const_ptr_for_string[s] + except KeyError: + pass + result = ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, llstr(s))) + if not we_are_translated(): + _const_ptr_for_string[s] = result + return result +_const_ptr_for_string = {} + # ____________________________________________________________ # The TreeLoop class contains a loop or a generalized loop, i.e. a tree Modified: pypy/trunk/pypy/jit/metainterp/optimizefindnode.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizefindnode.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizefindnode.py Tue Sep 28 16:25:50 2010 @@ -174,7 +174,7 @@ find_nodes_PTR_EQ = find_nodes_no_escape find_nodes_PTR_NE = find_nodes_no_escape - find_nodes_INSTANCEOF = find_nodes_no_escape + ##find_nodes_INSTANCEOF = find_nodes_no_escape find_nodes_GUARD_NONNULL = find_nodes_no_escape find_nodes_GUARD_ISNULL = find_nodes_no_escape Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/__init__.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/__init__.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/__init__.py Tue Sep 28 16:25:50 2010 @@ -3,6 +3,7 @@ from pypy.jit.metainterp.optimizeopt.intbounds import OptIntBounds from pypy.jit.metainterp.optimizeopt.virtualize import OptVirtualize from pypy.jit.metainterp.optimizeopt.heap import OptHeap +from pypy.jit.metainterp.optimizeopt.string import OptString def optimize_loop_1(metainterp_sd, loop, virtuals=True): """Optimize loop.operations to make it match the input of loop.specnodes @@ -13,6 +14,7 @@ optimizations = [OptIntBounds(), OptRewrite(), OptVirtualize(), + OptString(), OptHeap(), ] optimizer = Optimizer(metainterp_sd, loop, optimizations, virtuals) @@ -23,4 +25,3 @@ expect 'specnodes' on the bridge. """ optimize_loop_1(metainterp_sd, bridge, False) - Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py Tue Sep 28 16:25:50 2010 @@ -12,6 +12,7 @@ from pypy.rpython.lltypesystem import lltype from pypy.jit.metainterp.history import AbstractDescr, make_hashable_int from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded +from pypy.tool.pairtype import extendabletype LEVEL_UNKNOWN = '\x00' LEVEL_NONNULL = '\x01' @@ -23,6 +24,7 @@ MININT = -sys.maxint - 1 class OptValue(object): + __metaclass__ = extendabletype _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound') last_guard_index = -1 @@ -126,6 +128,7 @@ def setitem(self, index, value): raise NotImplementedError + class ConstantValue(OptValue): def __init__(self, box): self.make_constant(box) @@ -134,6 +137,7 @@ CONST_1 = ConstInt(1) CVAL_ZERO = ConstantValue(CONST_0) CVAL_ZERO_FLOAT = ConstantValue(ConstFloat(0.0)) +CVAL_UNINITIALIZED_ZERO = ConstantValue(CONST_0) llhelper.CVAL_NULLREF = ConstantValue(llhelper.CONST_NULL) oohelper.CVAL_NULLREF = ConstantValue(oohelper.CONST_NULL) @@ -249,6 +253,7 @@ return None def make_equal_to(self, box, value): + assert isinstance(value, OptValue) assert box not in self.values self.values[box] = value @@ -306,6 +311,9 @@ # accumulate counters self.resumedata_memo.update_counters(self.metainterp_sd.profiler) + def send_extra_operation(self, op): + self.first_optimization.propagate_forward(op) + def propagate_forward(self, op): self.producer[op.result] = op opnum = op.getopnum() Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py Tue Sep 28 16:25:50 2010 @@ -140,6 +140,7 @@ args = op.getarglist()[1:] self.emit_operation(ResOperation(rop.CALL, args, op.result, op.getdescr())) + def optimize_guard(self, op, constbox, emit_operation=True): value = self.getvalue(op.getarg(0)) if value.is_constant(): @@ -310,17 +311,17 @@ def optimize_PTR_EQ(self, op): self._optimize_oois_ooisnot(op, False) - def optimize_INSTANCEOF(self, op): - value = self.getvalue(op.getarg(0)) - realclassbox = value.get_constant_class(self.optimizer.cpu) - if realclassbox is not None: - checkclassbox = self.optimizer.cpu.typedescr2classbox(op.getdescr()) - result = self.optimizer.cpu.ts.subclassOf(self.optimizer.cpu, - realclassbox, - checkclassbox) - self.make_constant_int(op.result, result) - return - self.emit_operation(op) +## def optimize_INSTANCEOF(self, op): +## value = self.getvalue(op.args[0]) +## realclassbox = value.get_constant_class(self.optimizer.cpu) +## if realclassbox is not None: +## checkclassbox = self.optimizer.cpu.typedescr2classbox(op.descr) +## result = self.optimizer.cpu.ts.subclassOf(self.optimizer.cpu, +## realclassbox, +## checkclassbox) +## self.make_constant_int(op.result, result) +## return +## self.emit_operation(op) optimize_ops = _findall(OptRewrite, 'optimize_') Copied: pypy/trunk/pypy/jit/metainterp/optimizeopt/string.py (from r77432, pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/string.py) ============================================================================== --- pypy/branch/jit-str/pypy/jit/metainterp/optimizeopt/string.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/string.py Tue Sep 28 16:25:50 2010 @@ -277,7 +277,7 @@ # dispatch based on 'oopspecindex' to a method that handles # specifically the given oopspec call. For non-oopspec calls, # oopspecindex is just zero. - effectinfo = op.descr.get_extra_info() + effectinfo = op.getdescr().get_extra_info() if effectinfo is not None: oopspecindex = effectinfo.oopspecindex for value, meth in opt_call_oopspec_ops: @@ -287,11 +287,11 @@ self.emit_operation(op) def opt_call_oopspec_ARRAYCOPY(self, op): - source_value = self.getvalue(op.args[1]) - dest_value = self.getvalue(op.args[2]) - source_start_box = self.get_constant_box(op.args[3]) - dest_start_box = self.get_constant_box(op.args[4]) - length = self.get_constant_box(op.args[5]) + source_value = self.getvalue(op.getarg(1)) + dest_value = self.getvalue(op.getarg(2)) + source_start_box = self.get_constant_box(op.getarg(3)) + dest_start_box = self.get_constant_box(op.getarg(4)) + length = self.get_constant_box(op.getarg(5)) if (source_value.is_virtual() and source_start_box and dest_start_box and length and dest_value.is_virtual()): # XXX optimize the case where dest value is not virtual, @@ -307,11 +307,11 @@ return False def optimize_NEWSTR(self, op): - length_box = self.get_constant_box(op.args[0]) + length_box = self.get_constant_box(op.getarg(0)) if length_box: # if the original 'op' did not have a ConstInt as argument, # build a new one with the ConstInt argument - if not isinstance(op.args[0], ConstInt): + if not isinstance(op.getarg(0), ConstInt): op = ResOperation(rop.NEWSTR, [length_box], op.result) vvalue = self.make_vstring_plain(op.result, op) vvalue.setup(length_box.getint()) @@ -320,18 +320,18 @@ self.emit_operation(op) def optimize_STRSETITEM(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_virtual() and isinstance(value, VStringPlainValue): - indexbox = self.get_constant_box(op.args[1]) + indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: - value.setitem(indexbox.getint(), self.getvalue(op.args[2])) + value.setitem(indexbox.getint(), self.getvalue(op.getarg(2))) return value.ensure_nonnull() self.emit_operation(op) def optimize_STRGETITEM(self, op): - value = self.getvalue(op.args[0]) - vindex = self.getvalue(op.args[1]) + value = self.getvalue(op.getarg(0)) + vindex = self.getvalue(op.getarg(1)) vresult = self.strgetitem(value, vindex) self.make_equal_to(op.result, vresult) @@ -354,13 +354,13 @@ return self.getvalue(resbox) def optimize_STRLEN(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) lengthbox = value.getstrlen(self.optimizer.newoperations) self.make_equal_to(op.result, self.getvalue(lengthbox)) def opt_call_oopspec_STR_CONCAT(self, op): - vleft = self.getvalue(op.args[1]) - vright = self.getvalue(op.args[2]) + vleft = self.getvalue(op.getarg(1)) + vright = self.getvalue(op.getarg(2)) vleft.ensure_nonnull() vright.ensure_nonnull() newoperations = self.optimizer.newoperations @@ -373,9 +373,9 @@ def opt_call_oopspec_STR_SLICE(self, op): newoperations = self.optimizer.newoperations - vstr = self.getvalue(op.args[1]) - vstart = self.getvalue(op.args[2]) - vstop = self.getvalue(op.args[3]) + vstr = self.getvalue(op.getarg(1)) + vstart = self.getvalue(op.getarg(2)) + vstop = self.getvalue(op.getarg(3)) # if (isinstance(vstr, VStringPlainValue) and vstart.is_constant() and vstop.is_constant()): @@ -403,8 +403,8 @@ return True def opt_call_oopspec_STR_EQUAL(self, op): - v1 = self.getvalue(op.args[1]) - v2 = self.getvalue(op.args[2]) + v1 = self.getvalue(op.getarg(1)) + v2 = self.getvalue(op.getarg(2)) # l1box = v1.getstrlen(None) l2box = v2.getstrlen(None) @@ -514,7 +514,7 @@ self.optimizer.newoperations.append(op) def propagate_forward(self, op): - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/virtualize.py Tue Sep 28 16:25:50 2010 @@ -188,12 +188,12 @@ itemboxes.append(itemvalue.get_key_box()) modifier.register_virtual_fields(self.keybox, itemboxes) for itemvalue in self._items: - if itemvalue is not self.constvalue: - itemvalue.get_args_for_fail(modifier) + itemvalue.get_args_for_fail(modifier) def _make_virtual(self, modifier): return modifier.make_varray(self.arraydescr) + class __extend__(SpecNode): def setup_virtual_node(self, optimizer, box, newinputargs): raise NotImplementedError @@ -324,14 +324,13 @@ vrefinfo = self.optimizer.metainterp_sd.virtualref_info # op.getarg(1) should really never point to null here # - set 'forced' to point to the real object - op1 = ResOperation(rop.SETFIELD_GC, op.getarglist(), None, - descr = vrefinfo.descr_forced) - self.optimize_SETFIELD_GC(op1) + seo = self.optimizer.send_extra_operation + seo(ResOperation(rop.SETFIELD_GC, op.getarglist(), None, + descr = vrefinfo.descr_forced)) # - set 'virtual_token' to TOKEN_NONE args = [op.getarg(0), ConstInt(vrefinfo.TOKEN_NONE)] - op1 = ResOperation(rop.SETFIELD_GC, args, None, - descr = vrefinfo.descr_virtual_token) - self.optimize_SETFIELD_GC(op1) + seo(ResOperation(rop.SETFIELD_GC, args, None, + descr = vrefinfo.descr_virtual_token)) # Note that in some cases the virtual in op.getarg(1) has been forced # already. This is fine. In that case, and *if* a residual # CALL_MAY_FORCE suddenly turns out to access it, then it will @@ -358,8 +357,8 @@ def optimize_SETFIELD_GC(self, op): value = self.getvalue(op.getarg(0)) - fieldvalue = self.getvalue(op.getarg(1)) if value.is_virtual(): + fieldvalue = self.getvalue(op.getarg(1)) value.setfield(op.getdescr(), fieldvalue) else: value.ensure_nonnull() @@ -382,7 +381,7 @@ descr=op.getdescr()) self.make_varray(op.getdescr(), sizebox.getint(), op.result, op) else: - ###self.optimize_default(op) + self.getvalue(op.result).ensure_nonnull() self.emit_operation(op) def optimize_ARRAYLEN_GC(self, op): @@ -421,30 +420,6 @@ ###self.heap_op_optimizer.optimize_SETARRAYITEM_GC(op, value, fieldvalue) self.emit_operation(op) - def optimize_ARRAYCOPY(self, op): - source_value = self.getvalue(op.getarg(2)) - dest_value = self.getvalue(op.getarg(3)) - source_start_box = self.get_constant_box(op.getarg(4)) - dest_start_box = self.get_constant_box(op.getarg(5)) - length = self.get_constant_box(op.getarg(6)) - if (source_value.is_virtual() and source_start_box and dest_start_box - and length and dest_value.is_virtual()): - # XXX optimize the case where dest value is not virtual, - # but we still can avoid a mess - source_start = source_start_box.getint() - dest_start = dest_start_box.getint() - for index in range(length.getint()): - val = source_value.getitem(index + source_start) - dest_value.setitem(index + dest_start, val) - return - if length and length.getint() == 0: - return # 0-length arraycopy - descr = op.getarg(0) - assert isinstance(descr, AbstractDescr) - args = op.getarglist()[1:] - self.emit_operation(ResOperation(rop.CALL, args, op.result, - descr)) - def propagate_forward(self, op): opnum = op.getopnum() for value, func in optimize_ops: Modified: pypy/trunk/pypy/jit/metainterp/optimizeutil.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeutil.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeutil.py Tue Sep 28 16:25:50 2010 @@ -14,6 +14,11 @@ def _findall(Class, name_prefix): result = [] + for name in dir(Class): + if name.startswith(name_prefix): + opname = name[len(name_prefix):] + if opname.isupper(): + assert hasattr(resoperation.rop, opname) for value, name in resoperation.opname.items(): if hasattr(Class, name_prefix + name): result.append((value, getattr(Class, name_prefix + name))) Modified: pypy/trunk/pypy/jit/metainterp/pyjitpl.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/pyjitpl.py (original) +++ pypy/trunk/pypy/jit/metainterp/pyjitpl.py Tue Sep 28 16:25:50 2010 @@ -421,14 +421,6 @@ def opimpl_arraylen_gc(self, arraybox, arraydescr): return self.execute_with_descr(rop.ARRAYLEN_GC, arraydescr, arraybox) - @arguments("descr", "box", "box", "box", "box", "box", "box", "descr") - def opimpl_arraycopy(self, calldescr, fnptr, sourcebox, destbox, - source_startbox, dest_startbox, lengthbox, - arraydescr): - self.execute_with_descr(rop.ARRAYCOPY, arraydescr, calldescr, fnptr, - sourcebox, destbox, source_startbox, - dest_startbox, lengthbox) - @arguments("orgpc", "box", "descr", "box") def opimpl_check_neg_index(self, orgpc, arraybox, arraydescr, indexbox): negbox = self.metainterp.execute_and_record( Modified: pypy/trunk/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/resoperation.py (original) +++ pypy/trunk/pypy/jit/metainterp/resoperation.py Tue Sep 28 16:25:50 2010 @@ -450,7 +450,6 @@ 'SETARRAYITEM_RAW/3d', 'SETFIELD_GC/2d', 'SETFIELD_RAW/2d', - 'ARRAYCOPY/7d', # removed before it's passed to the backend 'NEWSTR/1', 'STRSETITEM/3', 'UNICODESETITEM/3', @@ -459,6 +458,8 @@ 'COND_CALL_GC_WB/1d', # [objptr] (for the write barrier) 'DEBUG_MERGE_POINT/1', # debugging only 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend + 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length + 'COPYUNICODECONTENT/5', '_CANRAISE_FIRST', # ----- start of can_raise operations ----- 'CALL/*d', Modified: pypy/trunk/pypy/jit/metainterp/resume.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/resume.py (original) +++ pypy/trunk/pypy/jit/metainterp/resume.py Tue Sep 28 16:25:50 2010 @@ -4,10 +4,12 @@ from pypy.jit.metainterp.history import INT, REF, FLOAT, HOLE from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp import jitprof -from pypy.rpython.lltypesystem import lltype, llmemory, rffi +from pypy.jit.codewriter.effectinfo import EffectInfo, callinfo_for_oopspec +from pypy.jit.codewriter.effectinfo import funcptr_for_oopspec +from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rstr from pypy.rlib import rarithmetic from pypy.rlib.objectmodel import we_are_translated, specialize -from pypy.rlib.debug import have_debug_prints +from pypy.rlib.debug import have_debug_prints, ll_assert from pypy.rlib.debug import debug_start, debug_stop, debug_print # Logic to encode the chain of frames and the state of the boxes at a @@ -253,6 +255,15 @@ def make_varray(self, arraydescr): return VArrayInfo(arraydescr) + def make_vstrplain(self): + return VStrPlainInfo() + + def make_vstrconcat(self): + return VStrConcatInfo() + + def make_vstrslice(self): + return VStrSliceInfo() + def register_virtual_fields(self, virtualbox, fieldboxes): tagged = self.liveboxes_from_env.get(virtualbox, UNASSIGNEDVIRTUAL) self.liveboxes[virtualbox] = tagged @@ -397,9 +408,7 @@ class AbstractVirtualInfo(object): - #def allocate(self, metainterp): - # raise NotImplementedError - #def setfields(self, decoder, struct): + #def allocate(self, decoder, index): # raise NotImplementedError def equals(self, fieldnums): return tagged_list_eq(self.fieldnums, fieldnums) @@ -419,6 +428,7 @@ for i in range(len(self.fielddescrs)): descr = self.fielddescrs[i] decoder.setfield(descr, struct, self.fieldnums[i]) + return struct def debug_prints(self): assert len(self.fielddescrs) == len(self.fieldnums) @@ -433,8 +443,10 @@ self.known_class = known_class @specialize.argtype(1) - def allocate(self, decoder): - return decoder.allocate_with_vtable(self.known_class) + def allocate(self, decoder, index): + struct = decoder.allocate_with_vtable(self.known_class) + decoder.virtuals_cache[index] = struct + return self.setfields(decoder, struct) def debug_prints(self): debug_print("\tvirtualinfo", self.known_class.repr_rpython()) @@ -446,8 +458,10 @@ self.typedescr = typedescr @specialize.argtype(1) - def allocate(self, decoder): - return decoder.allocate_struct(self.typedescr) + def allocate(self, decoder, index): + struct = decoder.allocate_struct(self.typedescr) + decoder.virtuals_cache[index] = struct + return self.setfields(decoder, struct) def debug_prints(self): debug_print("\tvstructinfo", self.typedescr.repr_rpython()) @@ -459,14 +473,11 @@ #self.fieldnums = ... @specialize.argtype(1) - def allocate(self, decoder): + def allocate(self, decoder, index): length = len(self.fieldnums) - return decoder.allocate_array(self.arraydescr, length) - - @specialize.argtype(1) - def setfields(self, decoder, array): arraydescr = self.arraydescr - length = len(self.fieldnums) + array = decoder.allocate_array(arraydescr, length) + decoder.virtuals_cache[index] = array # NB. the check for the kind of array elements is moved out of the loop if arraydescr.is_array_of_pointers(): for i in range(length): @@ -480,12 +491,65 @@ for i in range(length): decoder.setarrayitem_int(arraydescr, array, i, self.fieldnums[i]) + return array def debug_prints(self): debug_print("\tvarrayinfo", self.arraydescr) for i in self.fieldnums: debug_print("\t\t", str(untag(i))) + +class VStrPlainInfo(AbstractVirtualInfo): + """Stands for the string made out of the characters of all fieldnums.""" + + @specialize.argtype(1) + def allocate(self, decoder, index): + length = len(self.fieldnums) + string = decoder.allocate_string(length) + decoder.virtuals_cache[index] = string + for i in range(length): + decoder.string_setitem(string, i, self.fieldnums[i]) + return string + + def debug_prints(self): + debug_print("\tvstrplaininfo length", len(self.fieldnums)) + + +class VStrConcatInfo(AbstractVirtualInfo): + """Stands for the string made out of the concatenation of two + other strings.""" + + @specialize.argtype(1) + def allocate(self, decoder, index): + # xxx for blackhole resuming, this will build all intermediate + # strings and throw them away immediately, which is a bit sub- + # efficient. Not sure we care. + left, right = self.fieldnums + string = decoder.concat_strings(left, right) + decoder.virtuals_cache[index] = string + return string + + def debug_prints(self): + debug_print("\tvstrconcatinfo") + for i in self.fieldnums: + debug_print("\t\t", str(untag(i))) + + +class VStrSliceInfo(AbstractVirtualInfo): + """Stands for the string made out of slicing another string.""" + + @specialize.argtype(1) + def allocate(self, decoder, index): + largerstr, start, length = self.fieldnums + string = decoder.slice_string(largerstr, start, length) + decoder.virtuals_cache[index] = string + return string + + def debug_prints(self): + debug_print("\tvstrsliceinfo") + for i in self.fieldnums: + debug_print("\t\t", str(untag(i))) + # ____________________________________________________________ class AbstractResumeDataReader(object): @@ -496,7 +560,8 @@ blackholing and want the best performance. """ _mixin_ = True - virtuals = None + rd_virtuals = None + virtuals_cache = None virtual_default = None def _init(self, cpu, storage): @@ -508,17 +573,29 @@ self._prepare_virtuals(storage.rd_virtuals) self._prepare_pendingfields(storage.rd_pendingfields) + def getvirtual(self, index): + # Returns the index'th virtual, building it lazily if needed. + # Note that this may be called recursively; that's why the + # allocate() methods must fill in the cache as soon as they + # have the object, before they fill its fields. + v = self.virtuals_cache[index] + if not v: + v = self.rd_virtuals[index].allocate(self, index) + ll_assert(v == self.virtuals_cache[index], "resume.py: bad cache") + return v + + def force_all_virtuals(self): + rd_virtuals = self.rd_virtuals + if rd_virtuals: + for i in range(len(rd_virtuals)): + if rd_virtuals[i] is not None: + self.getvirtual(i) + return self.virtuals_cache + def _prepare_virtuals(self, virtuals): if virtuals: - self.virtuals = [self.virtual_default] * len(virtuals) - for i in range(len(virtuals)): - vinfo = virtuals[i] - if vinfo is not None: - self.virtuals[i] = vinfo.allocate(self) - for i in range(len(virtuals)): - vinfo = virtuals[i] - if vinfo is not None: - vinfo.setfields(self, self.virtuals[i]) + self.rd_virtuals = virtuals + self.virtuals_cache = [self.virtual_default] * len(virtuals) def _prepare_pendingfields(self, pendingfields): if pendingfields is not None: @@ -622,6 +699,32 @@ return self.metainterp.execute_and_record(rop.NEW_ARRAY, arraydescr, ConstInt(length)) + def allocate_string(self, length): + return self.metainterp.execute_and_record(rop.NEWSTR, + None, ConstInt(length)) + + def string_setitem(self, strbox, index, charnum): + charbox = self.decode_box(charnum, INT) + self.metainterp.execute_and_record(rop.STRSETITEM, None, + strbox, ConstInt(index), charbox) + + def concat_strings(self, str1num, str2num): + calldescr, func = callinfo_for_oopspec(EffectInfo.OS_STR_CONCAT) + str1box = self.decode_box(str1num, REF) + str2box = self.decode_box(str2num, REF) + return self.metainterp.execute_and_record_varargs( + rop.CALL, [ConstInt(func), str1box, str2box], calldescr) + + def slice_string(self, strnum, startnum, lengthnum): + calldescr, func = callinfo_for_oopspec(EffectInfo.OS_STR_SLICE) + strbox = self.decode_box(strnum, REF) + startbox = self.decode_box(startnum, INT) + lengthbox = self.decode_box(lengthnum, INT) + stopbox = self.metainterp.execute_and_record(rop.INT_ADD, None, + startbox, lengthbox) + return self.metainterp.execute_and_record_varargs( + rop.CALL, [ConstInt(func), strbox, startbox, stopbox], calldescr) + def setfield(self, descr, structbox, fieldnum): if descr.is_pointer_field(): kind = REF @@ -663,9 +766,7 @@ else: box = self.consts[num] elif tag == TAGVIRTUAL: - virtuals = self.virtuals - assert virtuals is not None - box = virtuals[num] + box = self.getvirtual(num) elif tag == TAGINT: box = ConstInt(num) else: @@ -750,7 +851,7 @@ resumereader.handling_async_forcing() vrefinfo = metainterp_sd.virtualref_info resumereader.consume_vref_and_vable(vrefinfo, vinfo) - return resumereader.virtuals + return resumereader.force_all_virtuals() class ResumeDataDirectReader(AbstractResumeDataReader): unique_id = lambda: None @@ -768,7 +869,9 @@ # special case for resuming after a GUARD_NOT_FORCED: we already # have the virtuals self.resume_after_guard_not_forced = 2 - self.virtuals = all_virtuals + self.virtuals_cache = all_virtuals + # self.rd_virtuals can remain None, because virtuals_cache is + # already filled def handling_async_forcing(self): self.resume_after_guard_not_forced = 1 @@ -839,6 +942,31 @@ def allocate_array(self, arraydescr, length): return self.cpu.bh_new_array(arraydescr, length) + def allocate_string(self, length): + return self.cpu.bh_newstr(length) + + def string_setitem(self, str, index, charnum): + char = self.decode_int(charnum) + self.cpu.bh_strsetitem(str, index, char) + + def concat_strings(self, str1num, str2num): + str1 = self.decode_ref(str1num) + str2 = self.decode_ref(str2num) + str1 = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), str1) + str2 = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), str2) + funcptr = funcptr_for_oopspec(EffectInfo.OS_STR_CONCAT) + result = funcptr(str1, str2) + return lltype.cast_opaque_ptr(llmemory.GCREF, result) + + def slice_string(self, strnum, startnum, lengthnum): + str = self.decode_ref(strnum) + start = self.decode_int(startnum) + length = self.decode_int(lengthnum) + str = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), str) + funcptr = funcptr_for_oopspec(EffectInfo.OS_STR_SLICE) + result = funcptr(str, start, start + length) + return lltype.cast_opaque_ptr(llmemory.GCREF, result) + def setfield(self, descr, struct, fieldnum): if descr.is_pointer_field(): newvalue = self.decode_ref(fieldnum) @@ -881,9 +1009,7 @@ return self.cpu.ts.NULLREF return self.consts[num].getref_base() elif tag == TAGVIRTUAL: - virtuals = self.virtuals - assert virtuals is not None - return virtuals[num] + return self.getvirtual(num) else: assert tag == TAGBOX if num < 0: Modified: pypy/trunk/pypy/jit/metainterp/simple_optimize.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/simple_optimize.py (original) +++ pypy/trunk/pypy/jit/metainterp/simple_optimize.py Tue Sep 28 16:25:50 2010 @@ -9,16 +9,11 @@ def transform(op): from pypy.jit.metainterp.history import AbstractDescr - # change ARRAYCOPY to call, so we don't have to pass around - # unnecessary information to the backend. Do the same with VIRTUAL_REF_*. - if op.getopnum() == rop.ARRAYCOPY: - descr = op.getarg(0) - assert isinstance(descr, AbstractDescr) - args = op.getarglist()[1:] - op = ResOperation(rop.CALL, args, op.result, descr=descr) - elif op.getopnum() == rop.CALL_PURE: - args = op.getarglist()[1:] - op = ResOperation(rop.CALL, args, op.result, op.getdescr()) + # Rename CALL_PURE to CALL. + # Simplify the VIRTUAL_REF_* so that they don't show up in the backend. + if op.getopnum() == rop.CALL_PURE: + op = ResOperation(rop.CALL, op.getarglist()[1:], op.result, + op.getdescr()) elif op.getopnum() == rop.VIRTUAL_REF: op = ResOperation(rop.SAME_AS, [op.getarg(0)], op.result) elif op.getopnum() == rop.VIRTUAL_REF_FINISH: Modified: pypy/trunk/pypy/jit/metainterp/test/oparser.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/oparser.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/oparser.py Tue Sep 28 16:25:50 2010 @@ -5,13 +5,12 @@ from pypy.jit.metainterp.history import TreeLoop, BoxInt, ConstInt,\ ConstObj, ConstPtr, Box, BasicFailDescr, BoxFloat, ConstFloat,\ - LoopToken + LoopToken, get_const_ptr_for_string from pypy.jit.metainterp.resoperation import rop, ResOperation, ResOpWithDescr, N_aryOp from pypy.jit.metainterp.typesystem import llhelper from pypy.jit.codewriter.heaptracker import adr2int from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype -from pypy.rpython.annlowlevel import llstr class ParseError(Exception): pass @@ -157,8 +156,7 @@ if arg.startswith('"') or arg.startswith("'"): # XXX ootype info = arg.strip("'\"") - return ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, - llstr(info))) + return get_const_ptr_for_string(info) if arg.startswith('ConstClass('): name = arg[len('ConstClass('):-1] return self.get_const(name, 'class') Modified: pypy/trunk/pypy/jit/metainterp/test/test_optimizefindnode.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_optimizefindnode.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_optimizefindnode.py Tue Sep 28 16:25:50 2010 @@ -1,6 +1,6 @@ import py, random -from pypy.rpython.lltypesystem import lltype, llmemory, rclass +from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr from pypy.rpython.ootypesystem import ootype from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE @@ -115,6 +115,36 @@ mayforcevirtdescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([nextdescr], [], [], EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE)) + arraycopydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], oopspecindex=EffectInfo.OS_ARRAYCOPY)) + strconcatdescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], oopspecindex=EffectInfo.OS_STR_CONCAT)) + slicedescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], oopspecindex=EffectInfo.OS_STR_SLICE)) + strequaldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], oopspecindex=EffectInfo.OS_STR_EQUAL)) + streq_slice_checknull_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_SLICE_CHECKNULL)) + streq_slice_nonnull_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_SLICE_NONNULL)) + streq_slice_char_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_SLICE_CHAR)) + streq_nonnull_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_NONNULL)) + streq_nonnull_char_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_NONNULL_CHAR)) + streq_checknull_char_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_CHECKNULL_CHAR)) + streq_lengthok_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_LENGTHOK)) + class LoopToken(AbstractDescr): pass asmdescr = LoopToken() # it can be whatever, it's not a descr though Modified: pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py Tue Sep 28 16:25:50 2010 @@ -3082,7 +3082,7 @@ setarrayitem_gc(p1, 1, 1, descr=arraydescr) p2 = new_array(3, descr=arraydescr) setarrayitem_gc(p2, 1, 3, descr=arraydescr) - arraycopy(0, 0, p1, p2, 1, 1, 2, descr=arraydescr) + call(0, p1, p2, 1, 1, 2, descr=arraycopydescr) i2 = getarrayitem_gc(p2, 1, descr=arraydescr) jump(i2) ''' @@ -3099,7 +3099,7 @@ p2 = new_array(3, descr=arraydescr) setarrayitem_gc(p1, 0, i0, descr=arraydescr) setarrayitem_gc(p2, 0, 3, descr=arraydescr) - arraycopy(0, 0, p1, p2, 1, 1, 2, descr=arraydescr) + call(0, p1, p2, 1, 1, 2, descr=arraycopydescr) i2 = getarrayitem_gc(p2, 0, descr=arraydescr) jump(i2) ''' @@ -3116,7 +3116,7 @@ p2 = new_array(3, descr=arraydescr) setarrayitem_gc(p1, 2, 10, descr=arraydescr) setarrayitem_gc(p2, 2, 13, descr=arraydescr) - arraycopy(0, 0, p1, p2, 0, 0, 3, descr=arraydescr) + call(0, p1, p2, 0, 0, 3, descr=arraycopydescr) jump(p2) ''' expected = ''' @@ -3133,7 +3133,7 @@ ops = ''' [p1] p0 = new_array(0, descr=arraydescr) - arraycopy(0, 0, p0, p1, 0, 0, 0, descr=arraydescr) + call(0, p0, p1, 0, 0, 0, descr=arraycopydescr) jump(p1) ''' expected = ''' @@ -3893,7 +3893,606 @@ """ self.optimize_loop(ops, 'Not, Not', expected) + def test_newstr_1(self): + ops = """ + [i0] + p1 = newstr(1) + strsetitem(p1, 0, i0) + i1 = strgetitem(p1, 0) + jump(i1) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_newstr_2(self): + ops = """ + [i0, i1] + p1 = newstr(2) + strsetitem(p1, 0, i0) + strsetitem(p1, 1, i1) + i2 = strgetitem(p1, 1) + i3 = strgetitem(p1, 0) + jump(i2, i3) + """ + expected = """ + [i0, i1] + jump(i1, i0) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_str_concat_1(self): + ops = """ + [p1, p2] + p3 = call(0, p1, p2, descr=strconcatdescr) + jump(p2, p3) + """ + expected = """ + [p1, p2] + i1 = strlen(p1) + i2 = strlen(p2) + i3 = int_add(i1, i2) + p3 = newstr(i3) + i4 = strlen(p1) + copystrcontent(p1, p3, 0, 0, i4) + i5 = strlen(p2) + i6 = int_add(i4, i5) # will be killed by the backend + copystrcontent(p2, p3, 0, i4, i5) + jump(p2, p3) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_str_concat_vstr2_str(self): + ops = """ + [i0, i1, p2] + p1 = newstr(2) + strsetitem(p1, 0, i0) + strsetitem(p1, 1, i1) + p3 = call(0, p1, p2, descr=strconcatdescr) + jump(i1, i0, p3) + """ + expected = """ + [i0, i1, p2] + i2 = strlen(p2) + i3 = int_add(2, i2) + p3 = newstr(i3) + strsetitem(p3, 0, i0) + strsetitem(p3, 1, i1) + i4 = strlen(p2) + i5 = int_add(2, i4) # will be killed by the backend + copystrcontent(p2, p3, 0, 2, i4) + jump(i1, i0, p3) + """ + self.optimize_loop(ops, 'Not, Not, Not', expected) + + def test_str_concat_str_vstr2(self): + ops = """ + [i0, i1, p2] + p1 = newstr(2) + strsetitem(p1, 0, i0) + strsetitem(p1, 1, i1) + p3 = call(0, p2, p1, descr=strconcatdescr) + jump(i1, i0, p3) + """ + expected = """ + [i0, i1, p2] + i2 = strlen(p2) + i3 = int_add(i2, 2) + p3 = newstr(i3) + i4 = strlen(p2) + copystrcontent(p2, p3, 0, 0, i4) + strsetitem(p3, i4, i0) + i5 = int_add(i4, 1) + strsetitem(p3, i5, i1) + i6 = int_add(i5, 1) # will be killed by the backend + jump(i1, i0, p3) + """ + self.optimize_loop(ops, 'Not, Not, Not', expected) + + def test_str_concat_str_str_str(self): + ops = """ + [p1, p2, p3] + p4 = call(0, p1, p2, descr=strconcatdescr) + p5 = call(0, p4, p3, descr=strconcatdescr) + jump(p2, p3, p5) + """ + expected = """ + [p1, p2, p3] + i1 = strlen(p1) + i2 = strlen(p2) + i12 = int_add(i1, i2) + i3 = strlen(p3) + i123 = int_add(i12, i3) + p5 = newstr(i123) + i1b = strlen(p1) + copystrcontent(p1, p5, 0, 0, i1b) + i2b = strlen(p2) + i12b = int_add(i1b, i2b) + copystrcontent(p2, p5, 0, i1b, i2b) + i3b = strlen(p3) + i123b = int_add(i12b, i3b) # will be killed by the backend + copystrcontent(p3, p5, 0, i12b, i3b) + jump(p2, p3, p5) + """ + self.optimize_loop(ops, 'Not, Not, Not', expected) + + def test_str_concat_str_cstr1(self): + ops = """ + [p2] + p3 = call(0, p2, "x", descr=strconcatdescr) + jump(p3) + """ + expected = """ + [p2] + i2 = strlen(p2) + i3 = int_add(i2, 1) + p3 = newstr(i3) + i4 = strlen(p2) + copystrcontent(p2, p3, 0, 0, i4) + strsetitem(p3, i4, 120) # == ord('x') + i5 = int_add(i4, 1) # will be killed by the backend + jump(p3) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_str_concat_consts(self): + ops = """ + [] + p1 = same_as("ab") + p2 = same_as("cde") + p3 = call(0, p1, p2, descr=strconcatdescr) + escape(p3) + jump() + """ + expected = """ + [] + escape("abcde") + jump() + """ + self.optimize_loop(ops, '', expected) + + def test_str_slice_1(self): + ops = """ + [p1, i1, i2] + p2 = call(0, p1, i1, i2, descr=slicedescr) + jump(p2, i1, i2) + """ + expected = """ + [p1, i1, i2] + i3 = int_sub(i2, i1) + p2 = newstr(i3) + copystrcontent(p1, p2, i1, 0, i3) + jump(p2, i1, i2) + """ + self.optimize_loop(ops, 'Not, Not, Not', expected) + + def test_str_slice_2(self): + ops = """ + [p1, i2] + p2 = call(0, p1, 0, i2, descr=slicedescr) + jump(p2, i2) + """ + expected = """ + [p1, i2] + p2 = newstr(i2) + copystrcontent(p1, p2, 0, 0, i2) + jump(p2, i2) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_str_slice_3(self): + ops = """ + [p1, i1, i2, i3, i4] + p2 = call(0, p1, i1, i2, descr=slicedescr) + p3 = call(0, p2, i3, i4, descr=slicedescr) + jump(p3, i1, i2, i3, i4) + """ + expected = """ + [p1, i1, i2, i3, i4] + i0 = int_sub(i2, i1) # killed by the backend + i5 = int_sub(i4, i3) + i6 = int_add(i1, i3) + p3 = newstr(i5) + copystrcontent(p1, p3, i6, 0, i5) + jump(p3, i1, i2, i3, i4) + """ + self.optimize_loop(ops, 'Not, Not, Not, Not, Not', expected) + + def test_str_slice_getitem1(self): + ops = """ + [p1, i1, i2, i3] + p2 = call(0, p1, i1, i2, descr=slicedescr) + i4 = strgetitem(p2, i3) + escape(i4) + jump(p1, i1, i2, i3) + """ + expected = """ + [p1, i1, i2, i3] + i6 = int_sub(i2, i1) # killed by the backend + i5 = int_add(i1, i3) + i4 = strgetitem(p1, i5) + escape(i4) + jump(p1, i1, i2, i3) + """ + self.optimize_loop(ops, 'Not, Not, Not, Not', expected) + + def test_str_slice_plain(self): + ops = """ + [i3, i4] + p1 = newstr(2) + strsetitem(p1, 0, i3) + strsetitem(p1, 1, i4) + p2 = call(0, p1, 1, 2, descr=slicedescr) + i5 = strgetitem(p2, 0) + escape(i5) + jump(i3, i4) + """ + expected = """ + [i3, i4] + escape(i4) + jump(i3, i4) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_str_slice_concat(self): + ops = """ + [p1, i1, i2, p2] + p3 = call(0, p1, i1, i2, descr=slicedescr) + p4 = call(0, p3, p2, descr=strconcatdescr) + jump(p4, i1, i2, p2) + """ + expected = """ + [p1, i1, i2, p2] + i3 = int_sub(i2, i1) # length of p3 + i4 = strlen(p2) + i5 = int_add(i3, i4) + p4 = newstr(i5) + copystrcontent(p1, p4, i1, 0, i3) + i4b = strlen(p2) + i6 = int_add(i3, i4b) # killed by the backend + copystrcontent(p2, p4, 0, i3, i4b) + jump(p4, i1, i2, p2) + """ + self.optimize_loop(ops, 'Not, Not, Not, Not', expected) + + # ---------- + def optimize_loop_extradescrs(self, ops, spectext, optops): + from pypy.jit.metainterp.optimizeopt import string + def my_callinfo_for_oopspec(oopspecindex): + calldescrtype = type(LLtypeMixin.strequaldescr) + for value in LLtypeMixin.__dict__.values(): + if isinstance(value, calldescrtype): + if (value.get_extra_info() and + value.get_extra_info().oopspecindex == oopspecindex): + # returns 0 for 'func' in this test + return value, 0 + raise AssertionError("not found: oopspecindex=%d" % oopspecindex) + # + saved = string.callinfo_for_oopspec + try: + string.callinfo_for_oopspec = my_callinfo_for_oopspec + self.optimize_loop(ops, spectext, optops) + finally: + string.callinfo_for_oopspec = saved + + def test_str_equal_noop1(self): + ops = """ + [p1, p2] + i0 = call(0, p1, p2, descr=strequaldescr) + escape(i0) + jump(p1, p2) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not', ops) + + def test_str_equal_noop2(self): + ops = """ + [p1, p2, p3] + p4 = call(0, p1, p2, descr=strconcatdescr) + i0 = call(0, p3, p4, descr=strequaldescr) + escape(i0) + jump(p1, p2, p3) + """ + expected = """ + [p1, p2, p3] + i1 = strlen(p1) + i2 = strlen(p2) + i3 = int_add(i1, i2) + p4 = newstr(i3) + i4 = strlen(p1) + copystrcontent(p1, p4, 0, 0, i4) + i5 = strlen(p2) + i6 = int_add(i4, i5) # will be killed by the backend + copystrcontent(p2, p4, 0, i4, i5) + i0 = call(0, p3, p4, descr=strequaldescr) + escape(i0) + jump(p1, p2, p3) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not, Not', expected) + + def test_str_equal_slice1(self): + ops = """ + [p1, i1, i2, p3] + p4 = call(0, p1, i1, i2, descr=slicedescr) + i0 = call(0, p4, p3, descr=strequaldescr) + escape(i0) + jump(p1, i1, i2, p3) + """ + expected = """ + [p1, i1, i2, p3] + i3 = int_sub(i2, i1) + i0 = call(0, p1, i1, i3, p3, descr=streq_slice_checknull_descr) + escape(i0) + jump(p1, i1, i2, p3) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not, Not, Not', expected) + + def test_str_equal_slice2(self): + ops = """ + [p1, i1, i2, p3] + p4 = call(0, p1, i1, i2, descr=slicedescr) + i0 = call(0, p3, p4, descr=strequaldescr) + escape(i0) + jump(p1, i1, i2, p3) + """ + expected = """ + [p1, i1, i2, p3] + i4 = int_sub(i2, i1) + i0 = call(0, p1, i1, i4, p3, descr=streq_slice_checknull_descr) + escape(i0) + jump(p1, i1, i2, p3) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not, Not, Not', expected) + + def test_str_equal_slice3(self): + ops = """ + [p1, i1, i2, p3] + guard_nonnull(p3) [] + p4 = call(0, p1, i1, i2, descr=slicedescr) + i0 = call(0, p3, p4, descr=strequaldescr) + escape(i0) + jump(p1, i1, i2, p3) + """ + expected = """ + [p1, i1, i2, p3] + guard_nonnull(p3) [] + i4 = int_sub(i2, i1) + i0 = call(0, p1, i1, i4, p3, descr=streq_slice_nonnull_descr) + escape(i0) + jump(p1, i1, i2, p3) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not, Not, Not', expected) + + def test_str_equal_slice4(self): + ops = """ + [p1, i1, i2] + p3 = call(0, p1, i1, i2, descr=slicedescr) + i0 = call(0, p3, "x", descr=strequaldescr) + escape(i0) + jump(p1, i1, i2) + """ + expected = """ + [p1, i1, i2] + i3 = int_sub(i2, i1) + i0 = call(0, p1, i1, i3, 120, descr=streq_slice_char_descr) + escape(i0) + jump(p1, i1, i2) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not, Not', expected) + + def test_str_equal_slice5(self): + ops = """ + [p1, i1, i2, i3] + p4 = call(0, p1, i1, i2, descr=slicedescr) + p5 = newstr(1) + strsetitem(p5, 0, i3) + i0 = call(0, p5, p4, descr=strequaldescr) + escape(i0) + jump(p1, i1, i2, i3) + """ + expected = """ + [p1, i1, i2, i3] + i4 = int_sub(i2, i1) + i0 = call(0, p1, i1, i4, i3, descr=streq_slice_char_descr) + escape(i0) + jump(p1, i1, i2, i3) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not, Not, Not', expected) + + def test_str_equal_none1(self): + ops = """ + [p1] + i0 = call(0, p1, NULL, descr=strequaldescr) + escape(i0) + jump(p1) + """ + expected = """ + [p1] + i0 = ptr_eq(p1, NULL) + escape(i0) + jump(p1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_none2(self): + ops = """ + [p1] + i0 = call(0, NULL, p1, descr=strequaldescr) + escape(i0) + jump(p1) + """ + expected = """ + [p1] + i0 = ptr_eq(p1, NULL) + escape(i0) + jump(p1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_nonnull1(self): + ops = """ + [p1] + guard_nonnull(p1) [] + i0 = call(0, p1, "hello world", descr=strequaldescr) + escape(i0) + jump(p1) + """ + expected = """ + [p1] + guard_nonnull(p1) [] + i0 = call(0, p1, "hello world", descr=streq_nonnull_descr) + escape(i0) + jump(p1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_nonnull2(self): + ops = """ + [p1] + guard_nonnull(p1) [] + i0 = call(0, p1, "", descr=strequaldescr) + escape(i0) + jump(p1) + """ + expected = """ + [p1] + guard_nonnull(p1) [] + i1 = strlen(p1) + i0 = int_eq(i1, 0) + escape(i0) + jump(p1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_nonnull3(self): + ops = """ + [p1] + guard_nonnull(p1) [] + i0 = call(0, p1, "x", descr=strequaldescr) + escape(i0) + jump(p1) + """ + expected = """ + [p1] + guard_nonnull(p1) [] + i0 = call(0, p1, 120, descr=streq_nonnull_char_descr) + escape(i0) + jump(p1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_nonnull4(self): + ops = """ + [p1, p2] + p4 = call(0, p1, p2, descr=strconcatdescr) + i0 = call(0, "hello world", p4, descr=strequaldescr) + escape(i0) + jump(p1, p2) + """ + expected = """ + [p1, p2] + i1 = strlen(p1) + i2 = strlen(p2) + i3 = int_add(i1, i2) + p4 = newstr(i3) + i4 = strlen(p1) + copystrcontent(p1, p4, 0, 0, i4) + i5 = strlen(p2) + i6 = int_add(i4, i5) # will be killed by the backend + copystrcontent(p2, p4, 0, i4, i5) + i0 = call(0, "hello world", p4, descr=streq_nonnull_descr) + escape(i0) + jump(p1, p2) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not', expected) + + def test_str_equal_chars0(self): + ops = """ + [i1] + p1 = newstr(0) + i0 = call(0, p1, "", descr=strequaldescr) + escape(i0) + jump(i1) + """ + expected = """ + [i1] + escape(1) + jump(i1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_chars1(self): + ops = """ + [i1] + p1 = newstr(1) + strsetitem(p1, 0, i1) + i0 = call(0, p1, "x", descr=strequaldescr) + escape(i0) + jump(i1) + """ + expected = """ + [i1] + i0 = int_eq(i1, 120) # ord('x') + escape(i0) + jump(i1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_chars2(self): + ops = """ + [i1, i2] + p1 = newstr(2) + strsetitem(p1, 0, i1) + strsetitem(p1, 1, i2) + i0 = call(0, p1, "xy", descr=strequaldescr) + escape(i0) + jump(i1, i2) + """ + expected = """ + [i1, i2] + p1 = newstr(2) + strsetitem(p1, 0, i1) + strsetitem(p1, 1, i2) + i0 = call(0, p1, "xy", descr=streq_lengthok_descr) + escape(i0) + jump(i1, i2) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not', expected) + + def test_str_equal_chars3(self): + ops = """ + [p1] + i0 = call(0, "x", p1, descr=strequaldescr) + escape(i0) + jump(p1) + """ + expected = """ + [p1] + i0 = call(0, p1, 120, descr=streq_checknull_char_descr) + escape(i0) + jump(p1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_lengthmismatch1(self): + ops = """ + [i1] + p1 = newstr(1) + strsetitem(p1, 0, i1) + i0 = call(0, "xy", p1, descr=strequaldescr) + escape(i0) + jump(i1) + """ + expected = """ + [i1] + escape(0) + jump(i1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + # XXX unicode operations + # XXX str2unicode ##class TestOOtype(BaseTestOptimizeOpt, OOtypeMixin): Modified: pypy/trunk/pypy/jit/metainterp/test/test_resume.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_resume.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_resume.py Tue Sep 28 16:25:50 2010 @@ -199,10 +199,10 @@ def test_prepare_virtuals(): class FakeVinfo(object): - def allocate(self, decoder): - return "allocated" - def setfields(self, decoder, virtual): - assert virtual == "allocated" + def allocate(self, decoder, index): + s = "allocated" + decoder.virtuals_cache[index] = s + return s class FakeStorage(object): rd_virtuals = [FakeVinfo(), None] rd_numb = [] @@ -212,7 +212,97 @@ _already_allocated_resume_virtuals = None cpu = None reader = ResumeDataDirectReader(None, FakeStorage()) - assert reader.virtuals == ["allocated", reader.virtual_default] + assert reader.force_all_virtuals() == ["allocated", reader.virtual_default] + +# ____________________________________________________________ + +class FakeResumeDataReader(AbstractResumeDataReader): + def allocate_with_vtable(self, known_class): + return FakeBuiltObject(vtable=known_class) + def allocate_struct(self, typedescr): + return FakeBuiltObject(typedescr=typedescr) + def allocate_array(self, arraydescr, length): + return FakeBuiltObject(arraydescr=arraydescr, items=[None]*length) + def setfield(self, descr, struct, fieldnum): + setattr(struct, descr, fieldnum) + def setarrayitem_int(self, arraydescr, array, i, fieldnum): + assert 0 <= i < len(array.items) + assert arraydescr is array.arraydescr + array.items[i] = fieldnum + def allocate_string(self, length): + return FakeBuiltObject(string=[None]*length) + def string_setitem(self, string, i, fieldnum): + value, tag = untag(fieldnum) + assert tag == TAGINT + assert 0 <= i < len(string.string) + string.string[i] = value + def concat_strings(self, left, right): + return FakeBuiltObject(strconcat=[left, right]) + def slice_string(self, str, start, length): + return FakeBuiltObject(strslice=[str, start, length]) + +class FakeBuiltObject(object): + def __init__(self, **kwds): + self.__dict__ = kwds + def __eq__(self, other): + return (self.__class__ == other.__class__ and + self.__dict__ == other.__dict__) + def __repr__(self): + return 'FakeBuiltObject(%s)' % ( + ', '.join(['%s=%r' % item for item in self.__dict__.items()])) + +class FakeArrayDescr(object): + def is_array_of_pointers(self): return False + def is_array_of_floats(self): return False + +def test_virtualinfo(): + info = VirtualInfo(123, ["fielddescr1"]) + info.fieldnums = [tag(456, TAGINT)] + reader = FakeResumeDataReader() + reader._prepare_virtuals([info]) + assert reader.force_all_virtuals() == [ + FakeBuiltObject(vtable=123, fielddescr1=tag(456, TAGINT))] + +def test_vstructinfo(): + info = VStructInfo(124, ["fielddescr1"]) + info.fieldnums = [tag(456, TAGINT)] + reader = FakeResumeDataReader() + reader._prepare_virtuals([info]) + assert reader.force_all_virtuals() == [ + FakeBuiltObject(typedescr=124, fielddescr1=tag(456, TAGINT))] + +def test_varrayinfo(): + arraydescr = FakeArrayDescr() + info = VArrayInfo(arraydescr) + info.fieldnums = [tag(456, TAGINT)] + reader = FakeResumeDataReader() + reader._prepare_virtuals([info]) + assert reader.force_all_virtuals() == [ + FakeBuiltObject(arraydescr=arraydescr, items=[tag(456, TAGINT)])] + +def test_vstrplaininfo(): + info = VStrPlainInfo() + info.fieldnums = [tag(60, TAGINT)] + reader = FakeResumeDataReader() + reader._prepare_virtuals([info]) + assert reader.force_all_virtuals() == [ + FakeBuiltObject(string=[60])] + +def test_vstrconcatinfo(): + info = VStrConcatInfo() + info.fieldnums = [tag(10, TAGBOX), tag(20, TAGBOX)] + reader = FakeResumeDataReader() + reader._prepare_virtuals([info]) + assert reader.force_all_virtuals() == [ + FakeBuiltObject(strconcat=info.fieldnums)] + +def test_vstrsliceinfo(): + info = VStrSliceInfo() + info.fieldnums = [tag(10, TAGBOX), tag(20, TAGBOX), tag(30, TAGBOX)] + reader = FakeResumeDataReader() + reader._prepare_virtuals([info]) + assert reader.force_all_virtuals() == [ + FakeBuiltObject(strslice=info.fieldnums)] # ____________________________________________________________ @@ -957,7 +1047,7 @@ metainterp = MyMetaInterp() reader = ResumeDataFakeReader(storage, newboxes, metainterp) - assert len(reader.virtuals) == 2 + assert len(reader.virtuals_cache) == 2 b2t = reader.decode_ref(modifier._gettagged(b2s)) b4t = reader.decode_ref(modifier._gettagged(b4s)) trace = metainterp.trace @@ -973,9 +1063,9 @@ (rop.SETFIELD_GC, [b4t, b3t], None, LLtypeMixin.valuedescr), (rop.SETFIELD_GC, [b4t, b5t], None, LLtypeMixin.otherdescr)] if untag(modifier._gettagged(b2s))[0] == -2: - expected = [b2new, b4new] + b2set + b4set + expected = [b2new, b4new] + b4set + b2set else: - expected = [b4new, b2new] + b4set + b2set + expected = [b4new, b2new] + b2set + b4set for x, y in zip(expected, trace): assert x == y @@ -1020,7 +1110,7 @@ # resume metainterp = MyMetaInterp() reader = ResumeDataFakeReader(storage, newboxes, metainterp) - assert len(reader.virtuals) == 1 + assert len(reader.virtuals_cache) == 1 b2t = reader.decode_ref(tag(0, TAGVIRTUAL)) trace = metainterp.trace expected = [ @@ -1065,7 +1155,7 @@ NULL = ConstPtr.value metainterp = MyMetaInterp() reader = ResumeDataFakeReader(storage, newboxes, metainterp) - assert len(reader.virtuals) == 1 + assert len(reader.virtuals_cache) == 1 b2t = reader.decode_ref(tag(0, TAGVIRTUAL)) trace = metainterp.trace @@ -1112,7 +1202,7 @@ metainterp = MyMetaInterp() reader = ResumeDataFakeReader(storage, newboxes, metainterp) - assert reader.virtuals is None + assert reader.virtuals_cache is None trace = metainterp.trace b2set = (rop.SETFIELD_GC, [b2t, b4t], None, LLtypeMixin.nextdescr) expected = [b2set] Modified: pypy/trunk/pypy/jit/metainterp/test/test_string.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_string.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_string.py Tue Sep 28 16:25:50 2010 @@ -1,5 +1,5 @@ import py -from pypy.rlib.jit import JitDriver +from pypy.rlib.jit import JitDriver, dont_look_inside, we_are_jitted from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rpython.ootypesystem import ootype from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin @@ -72,6 +72,234 @@ res = self.meta_interp(f, [6, 10]) assert res == 6 + def test_char2string_pure(self): + for dochr in [chr, ]: #unichr]: + jitdriver = JitDriver(greens = [], reds = ['n']) + @dont_look_inside + def escape(x): + pass + def f(n): + while n > 0: + jitdriver.can_enter_jit(n=n) + jitdriver.jit_merge_point(n=n) + s = dochr(n) + if not we_are_jitted(): + s += s # forces to be a string + if n > 100: + escape(s) + n -= 1 + return 42 + self.meta_interp(f, [6]) + self.check_loops(newstr=0, strsetitem=0, strlen=0, + newunicode=0, unicodesetitem=0, unicodelen=0) + + def test_char2string_escape(self): + for dochr in [chr, ]: #unichr]: + jitdriver = JitDriver(greens = [], reds = ['n', 'total']) + @dont_look_inside + def escape(x): + return ord(x[0]) + def f(n): + total = 0 + while n > 0: + jitdriver.can_enter_jit(n=n, total=total) + jitdriver.jit_merge_point(n=n, total=total) + s = dochr(n) + if not we_are_jitted(): + s += s # forces to be a string + total += escape(s) + n -= 1 + return total + res = self.meta_interp(f, [6]) + assert res == 21 + + def test_char2string2char(self): + for dochr in [chr, ]: #unichr]: + jitdriver = JitDriver(greens = [], reds = ['m', 'total']) + def f(m): + total = 0 + while m > 0: + jitdriver.can_enter_jit(m=m, total=total) + jitdriver.jit_merge_point(m=m, total=total) + string = dochr(m) + if m > 100: + string += string # forces to be a string + # read back the character + c = string[0] + total += ord(c) + m -= 1 + return total + res = self.meta_interp(f, [6]) + assert res == 21 + self.check_loops(newstr=0, strgetitem=0, strsetitem=0, strlen=0, + newunicode=0, unicodegetitem=0, unicodesetitem=0, + unicodelen=0) + + def test_strconcat_pure(self): + for somestr in ["abc", ]: #u"def"]: + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + mylist = [somestr+str(i) for i in range(10)] + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = mylist[n] + mylist[m] + if m > 100: + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=0, strsetitem=0, + newunicode=0, unicodesetitem=0, + call=0, call_pure=0) + + def test_strconcat_escape_str_str(self): + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + mylist = ["somestr"+str(i) for i in range(10)] + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = mylist[n] + mylist[m] + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=1, strsetitem=0, copystrcontent=2, + call=1, call_pure=0) # escape + + def test_strconcat_escape_str_char(self): + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + mylist = ["somestr"+str(i) for i in range(10)] + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = mylist[n] + chr(m) + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=1, strsetitem=1, copystrcontent=1, + call=1, call_pure=0) # escape + + def test_strconcat_escape_char_str(self): + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + mylist = ["somestr"+str(i) for i in range(10)] + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = chr(n) + mylist[m] + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=1, strsetitem=1, copystrcontent=1, + call=1, call_pure=0) # escape + + def test_strconcat_escape_char_char(self): + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = chr(n) + chr(m) + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=1, strsetitem=2, copystrcontent=0, + call=1, call_pure=0) # escape + + def test_strconcat_escape_str_char_str(self): + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + mylist = ["somestr"+str(i) for i in range(10)] + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = mylist[n] + chr(n) + mylist[m] + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=1, strsetitem=1, copystrcontent=2, + call=1, call_pure=0) # escape + + def test_strconcat_guard_fail(self): + for somestr in ["abc", ]: #u"def"]: + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + mylist = [somestr+str(i) for i in range(12)] + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = mylist[n] + mylist[m] + if m & 1: + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 10]) + + def test_strslice(self): + for somestr in ["abc", ]: #u"def"]: + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + def f(n, m): + assert n >= 0 + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = "foobarbazetc"[m:n] + if m <= 5: + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [10, 10]) + + def test_streq_char(self): + for somestr in ["?abcdefg", ]: #u"def"]: + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + def f(n, m): + assert n >= 0 + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = somestr[:m] + escape(s == "?") + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=0, newunicode=0) + + class TestOOtype(StringTests, OOJitMixin): CALL = "oosend" CALL_PURE = "oosend_pure" Modified: pypy/trunk/pypy/jit/metainterp/test/test_ztranslation.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_ztranslation.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_ztranslation.py Tue Sep 28 16:25:50 2010 @@ -21,6 +21,7 @@ # - full optimizer # - jitdriver hooks # - two JITs + # - string concatenation, slicing and comparison class Frame(object): _virtualizable2_ = ['i'] @@ -60,11 +61,15 @@ frame.i -= 1 return total * 10 # - myjitdriver2 = JitDriver(greens = ['g'], reds = ['m', 'x']) + myjitdriver2 = JitDriver(greens = ['g'], reds = ['m', 'x', 's']) def f2(g, m, x): + s = "" while m > 0: - myjitdriver2.can_enter_jit(g=g, m=m, x=x) - myjitdriver2.jit_merge_point(g=g, m=m, x=x) + myjitdriver2.can_enter_jit(g=g, m=m, x=x, s=s) + myjitdriver2.jit_merge_point(g=g, m=m, x=x, s=s) + s += 'xy' + if s[:2] == 'yz': + return -666 m -= 1 x += 3 return x Modified: pypy/trunk/pypy/jit/metainterp/warmstate.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/warmstate.py (original) +++ pypy/trunk/pypy/jit/metainterp/warmstate.py Tue Sep 28 16:25:50 2010 @@ -83,6 +83,9 @@ return history.ConstFloat(value) else: return history.BoxFloat(value) + elif isinstance(value, str) or isinstance(value, unicode): + assert len(value) == 1 # must be a character + value = ord(value) else: value = intmask(value) if in_const_box: Modified: pypy/trunk/pypy/objspace/std/stringtype.py ============================================================================== --- pypy/trunk/pypy/objspace/std/stringtype.py (original) +++ pypy/trunk/pypy/objspace/std/stringtype.py Tue Sep 28 16:25:50 2010 @@ -4,6 +4,7 @@ from sys import maxint from pypy.rlib.objectmodel import specialize +from pypy.rlib.jit import we_are_jitted def wrapstr(space, s): from pypy.objspace.std.stringobject import W_StringObject @@ -32,7 +33,7 @@ def wrapchar(space, c): from pypy.objspace.std.stringobject import W_StringObject from pypy.objspace.std.ropeobject import rope, W_RopeObject - if space.config.objspace.std.withprebuiltchar: + if space.config.objspace.std.withprebuiltchar and not we_are_jitted(): if space.config.objspace.std.withrope: return W_RopeObject.PREBUILT[ord(c)] return W_StringObject.PREBUILT[ord(c)] Modified: pypy/trunk/pypy/rpython/annlowlevel.py ============================================================================== --- pypy/trunk/pypy/rpython/annlowlevel.py (original) +++ pypy/trunk/pypy/rpython/annlowlevel.py Tue Sep 28 16:25:50 2010 @@ -397,6 +397,8 @@ assert strtype in (str, unicode) def hlstr(ll_s): + if not ll_s: + return None if hasattr(ll_s, 'chars'): if strtype is str: return ''.join(ll_s.chars) @@ -423,9 +425,14 @@ def llstr(s): from pypy.rpython.lltypesystem.rstr import mallocstr, mallocunicode + from pypy.rpython.lltypesystem.rstr import STR, UNICODE if strtype is str: + if s is None: + return lltype.nullptr(STR) ll_s = mallocstr(len(s)) else: + if s is None: + return lltype.nullptr(UNICODE) ll_s = mallocunicode(len(s)) for i, c in enumerate(s): ll_s.chars[i] = c Modified: pypy/trunk/pypy/rpython/lltypesystem/rlist.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/rlist.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/rlist.py Tue Sep 28 16:25:50 2010 @@ -159,7 +159,6 @@ if 'item_repr' not in self.__dict__: self.external_item_repr, self.item_repr = externalvsinternal(self.rtyper, self._item_repr_computer()) if isinstance(self.LIST, GcForwardReference): - ITEM = self.item_repr.lowleveltype ITEMARRAY = self.get_itemarray_lowleveltype() self.LIST.become(ITEMARRAY) Modified: pypy/trunk/pypy/rpython/lltypesystem/rstr.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/rstr.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/rstr.py Tue Sep 28 16:25:50 2010 @@ -4,7 +4,7 @@ from pypy.rlib.objectmodel import malloc_zero_filled, we_are_translated from pypy.rlib.objectmodel import _hash_string, enforceargs from pypy.rlib.debug import ll_assert -from pypy.rlib.jit import purefunction +from pypy.rlib.jit import purefunction, we_are_jitted from pypy.rpython.robject import PyObjRepr, pyobj_repr from pypy.rpython.rmodel import inputconst, IntegerRepr from pypy.rpython.rstr import AbstractStringRepr,AbstractCharRepr,\ @@ -65,8 +65,8 @@ dst = llmemory.cast_ptr_to_adr(dst) + _str_ofs(dststart) llmemory.raw_memcopy(src, dst, llmemory.sizeof(CHAR_TP) * length) copy_string_contents._always_inline_ = True - copy_string_contents.oopspec = ( - '%s.copy_contents(src, dst, srcstart, dststart, length)' % name) + #copy_string_contents.oopspec = ( + # '%s.copy_contents(src, dst, srcstart, dststart, length)' % name) return func_with_new_name(copy_string_contents, 'copy_%s_contents' % name) copy_string_contents = _new_copy_contents_fun(STR, Char, 'string') @@ -326,6 +326,7 @@ s1.copy_contents(s1, newstr, 0, 0, len1) s1.copy_contents(s2, newstr, 0, len1, len2) return newstr + ll_strconcat.oopspec = 'stroruni.concat(s1, s2)' @purefunction def ll_strip(s, ch, left, right): @@ -443,8 +444,8 @@ if chars1[j] != chars2[j]: return False j += 1 - return True + ll_streq.oopspec = 'stroruni.equal(s1, s2)' @purefunction def ll_startswith(s1, s2): @@ -694,35 +695,32 @@ return result @purefunction - def ll_stringslice_startonly(s1, start): - len1 = len(s1.chars) - newstr = s1.malloc(len1 - start) - lgt = len1 - start - assert lgt >= 0 + def _ll_stringslice(s1, start, stop): + lgt = stop - start assert start >= 0 + assert lgt >= 0 + newstr = s1.malloc(lgt) s1.copy_contents(s1, newstr, start, 0, lgt) return newstr + _ll_stringslice.oopspec = 'stroruni.slice(s1, start, stop)' + + def ll_stringslice_startonly(s1, start): + return LLHelpers._ll_stringslice(s1, start, len(s1.chars)) - @purefunction def ll_stringslice_startstop(s1, start, stop): - if stop >= len(s1.chars): - if start == 0: - return s1 - stop = len(s1.chars) - newstr = s1.malloc(stop - start) - assert start >= 0 - lgt = stop - start - assert lgt >= 0 - s1.copy_contents(s1, newstr, start, 0, lgt) - return newstr + if we_are_jitted(): + if stop > len(s1.chars): + stop = len(s1.chars) + else: + if stop >= len(s1.chars): + if start == 0: + return s1 + stop = len(s1.chars) + return LLHelpers._ll_stringslice(s1, start, stop) - @purefunction def ll_stringslice_minusone(s1): newlen = len(s1.chars) - 1 - newstr = s1.malloc(newlen) - assert newlen >= 0 - s1.copy_contents(s1, newstr, 0, 0, newlen) - return newstr + return LLHelpers._ll_stringslice(s1, 0, newlen) def ll_split_chr(LIST, s, c): chars = s.chars From arigo at codespeak.net Tue Sep 28 16:27:31 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 28 Sep 2010 16:27:31 +0200 (CEST) Subject: [pypy-svn] r77453 - pypy/trunk/pypy/jit/metainterp/test Message-ID: <20100928142731.203F7282C03@codespeak.net> Author: arigo Date: Tue Sep 28 16:27:29 2010 New Revision: 77453 Modified: pypy/trunk/pypy/jit/metainterp/test/test_resume.py Log: Fix this test by removing the random order dependency. Modified: pypy/trunk/pypy/jit/metainterp/test/test_resume.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_resume.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_resume.py Tue Sep 28 16:27:29 2010 @@ -1062,13 +1062,14 @@ b4set = [(rop.SETFIELD_GC, [b4t, b2t], None, LLtypeMixin.nextdescr), (rop.SETFIELD_GC, [b4t, b3t], None, LLtypeMixin.valuedescr), (rop.SETFIELD_GC, [b4t, b5t], None, LLtypeMixin.otherdescr)] - if untag(modifier._gettagged(b2s))[0] == -2: - expected = [b2new, b4new] + b4set + b2set - else: - expected = [b4new, b2new] + b2set + b4set - - for x, y in zip(expected, trace): - assert x == y + expected = [b2new, b4new] + b4set + b2set + + # check that we get the operations in 'expected', in a possibly different + # order. + assert len(trace) == len(expected) + for x in trace: + assert x in expected + expected.remove(x) ptr = b2t.value._obj.container._as_ptr() assert lltype.typeOf(ptr) == lltype.Ptr(LLtypeMixin.NODE) assert ptr.value == 111 From afa at codespeak.net Tue Sep 28 16:28:03 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 28 Sep 2010 16:28:03 +0200 (CEST) Subject: [pypy-svn] r77454 - in pypy/branch/fast-forward/pypy/objspace/std: . test Message-ID: <20100928142803.1E0F2282BFB@codespeak.net> Author: afa Date: Tue Sep 28 16:28:01 2010 New Revision: 77454 Added: pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py pypy/branch/fast-forward/pypy/objspace/std/bytearraytype.py pypy/branch/fast-forward/pypy/objspace/std/test/test_bytes.py Modified: pypy/branch/fast-forward/pypy/objspace/std/model.py Log: First implementation of a bytearray object. For now, it's a non mutable array of chars... Added: pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py ============================================================================== --- (empty file) +++ pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py Tue Sep 28 16:28:01 2010 @@ -0,0 +1,192 @@ +from pypy.interpreter.error import OperationError +from pypy.objspace.std.model import registerimplementation, W_Object +from pypy.objspace.std.register_all import register_all +from pypy.objspace.std.inttype import wrapint +from pypy.objspace.std.multimethod import FailedToImplement +from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rstring import StringBuilder +from pypy.objspace.std.intobject import W_IntObject +from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice +from pypy.objspace.std import slicetype +from pypy.interpreter import gateway + +class W_BytearrayObject(W_Object): + from pypy.objspace.std.bytearraytype import bytearray_typedef as typedef + + def __init__(w_self, data): + w_self.data = list(data) + + def __repr__(w_self): + """ representation for debugging purposes """ + return "%s(%s)" % (w_self.__class__.__name__, ''.join(w_self.data)) + + def unwrap(w_bytearray, space): + return bytearray(w_self.data) + +registerimplementation(W_BytearrayObject) + + +def len__Bytearray(space, w_bytearray): + result = len(w_bytearray.data) + return wrapint(space, result) + +def getitem__Bytearray_ANY(space, w_bytearray, w_index): + # getindex_w should get a second argument space.w_IndexError, + # but that doesn't exist the first time this is called. + try: + w_IndexError = space.w_IndexError + except AttributeError: + w_IndexError = None + index = space.getindex_w(w_index, w_IndexError, "bytearray index") + try: + return space.newint(ord(w_bytearray.data[index])) + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("bytearray index out of range")) + +def getitem__Bytearray_Slice(space, w_bytearray, w_slice): + data = w_bytearray.data + length = len(data) + start, stop, step, slicelength = w_slice.indices4(space, length) + assert slicelength >= 0 + return W_BytearrayObject(data[start:stop:step]) + +def getslice__Bytearray_ANY_ANY(space, w_bytearray, w_start, w_stop): + length = len(w_bytearray.data) + start, stop = normalize_simple_slice(space, length, w_start, w_stop) + return W_BytearrayObject(w_bytearray.data[start:stop]) + +def contains__Bytearray_Int(space, w_bytearray, w_char): + char = w_char.intval + if not 0 <= char < 256: + raise OperationError(space.w_ValueError, + space.wrap("byte must be in range(0, 256)")) + for c in w_bytearray.data: + if ord(c) == char: + return space.w_True + return space.w_False + +def add__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): + data1 = w_bytearray1.data + data2 = w_bytearray2.data + return W_BytearrayObject(data1 + data2) + +def mul_bytearray_times(space, w_bytearray, w_times): + try: + times = space.getindex_w(w_times, space.w_OverflowError) + except OperationError, e: + if e.match(space, space.w_TypeError): + raise FailedToImplement + raise + if times == 1 and space.type(w_bytearray) == space.w_bytearray: + return w_bytearray + data = w_bytearray.data + return W_BytearrayObject(data * times) + +def mul__Bytearray_ANY(space, w_bytearray, w_times): + return mul_bytearray_times(space, w_bytearray, w_times) + +def mul__ANY_Bytearray(space, w_times, w_bytearray): + return mul_bytearray_times(space, w_bytearray, w_times) + +def eq__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): + data1 = w_bytearray1.data + data2 = w_bytearray2.data + if len(data1) != len(data2): + return space.w_False + for i in range(len(data1)): + if data1[i] != data2[i]: + return space.w_False + return space.w_True + +def _min(a, b): + if a < b: + return a + return b + +def lt__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): + data1 = w_bytearray1.data + data2 = w_bytearray2.data + ncmp = _min(len(data1), len(data2)) + # Search for the first index where items are different + for p in range(ncmp): + if data1[p] != data2[p]: + return space.newbool(data1[p] < data2[p]) + # No more items to compare -- compare sizes + return space.newbool(len(data1) < len(data2)) + +def gt__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): + data1 = w_bytearray1.data + data2 = w_bytearray2.data + ncmp = _min(len(data1), len(data2)) + # Search for the first index where items are different + for p in range(ncmp): + if data1[p] != data2[p]: + return space.newbool(data1[p] > data2[p]) + # No more items to compare -- compare sizes + return space.newbool(len(data1) > len(data2)) + +# Mostly copied from repr__String, but without the "smart quote" +# functionality. +def repr__Bytearray(space, w_bytearray): + s = w_bytearray.data + + buf = StringBuilder(50) + + buf.append("bytearray(b'") + + for i in range(len(s)): + c = s[i] + + if c == '\\' or c == "'": + buf.append('\\') + buf.append(c) + elif c == '\t': + buf.append('\\t') + elif c == '\r': + buf.append('\\r') + elif c == '\n': + buf.append('\\n') + elif not '\x20' <= c < '\x7f': + n = ord(c) + buf.append('\\x') + buf.append("0123456789abcdef"[n>>4]) + buf.append("0123456789abcdef"[n&0xF]) + else: + buf.append(c) + + buf.append("')") + + return space.wrap(buf.build()) + +def getnewargs__Bytearray(space, w_bytearray): + return space.newbytearray([W_BytearrayObject(w_bytearray.wrappeditems)]) + +def bytearray_count__Bytearray_ANY(space, w_bytearray, w_obj): + count = 0 + for w_item in w_bytearray.wrappeditems: + if space.eq_w(w_item, w_obj): + count += 1 + return space.wrap(count) + +def bytearray_index__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_obj, w_start, w_stop): + start = slicetype._Eval_SliceIndex(space, w_start) + stop = slicetype._Eval_SliceIndex(space, w_stop) + length = len(w_bytearray.wrappeditems) + if start < 0: + start += length + if start < 0: + start = 0 + if stop < 0: + stop += length + if stop < 0: + stop = 0 + for i in range(start, min(stop, length)): + w_item = w_bytearray.wrappeditems[i] + if space.eq_w(w_item, w_obj): + return space.wrap(i) + raise OperationError(space.w_ValueError, + space.wrap("bytearray.index(x): x not in bytearray")) + +from pypy.objspace.std import bytearraytype +register_all(vars(), bytearraytype) Added: pypy/branch/fast-forward/pypy/objspace/std/bytearraytype.py ============================================================================== --- (empty file) +++ pypy/branch/fast-forward/pypy/objspace/std/bytearraytype.py Tue Sep 28 16:28:01 2010 @@ -0,0 +1,41 @@ +import sys +from pypy.interpreter import gateway +from pypy.interpreter.baseobjspace import ObjSpace, W_Root +from pypy.objspace.std.register_all import register_all +from pypy.objspace.std.stdtypedef import StdTypeDef, SMM, no_hash_descr + + +bytearray_count = SMM( + "count", 2, + doc="B.count(sub [,start [,end]]) -> int\n" + "Return the number of non-overlapping occurrences of subsection sub in\n" + "bytes B[start:end]. Optional arguments start and end are interpreted\n" + "as in slice notation.") + +bytearray_index = SMM("index", 4, defaults=(0, sys.maxint), + doc="index(obj, [start, [stop]]) -> first index that obj " + "appears in the bytearray") + + at gateway.unwrap_spec(ObjSpace, W_Root, W_Root, W_Root, W_Root) +def descr__new__(space, w_bytearraytype, + w_source='', w_encoding=None, w_errors=None): + from pypy.objspace.std.bytearrayobject import W_BytearrayObject + if w_source is None: + data = [] + else: + data = space.str_w(w_source) + w_obj = space.allocate_instance(W_BytearrayObject, w_bytearraytype) + W_BytearrayObject.__init__(w_obj, data) + return w_obj + +# ____________________________________________________________ + +bytearray_typedef = StdTypeDef("bytearray", + __doc__ = '''bytearray() -> an empty bytearray +bytearray(sequence) -> bytearray initialized from sequence\'s items + +If the argument is a bytearray, the return value is the same object.''', + __new__ = gateway.interp2app(descr__new__), + __hash__ = no_hash_descr, + ) +bytearray_typedef.registermethods(globals()) Modified: pypy/branch/fast-forward/pypy/objspace/std/model.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/model.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/model.py Tue Sep 28 16:28:01 2010 @@ -47,6 +47,7 @@ from pypy.objspace.std.dicttype import dict_typedef from pypy.objspace.std.basestringtype import basestring_typedef from pypy.objspace.std.stringtype import str_typedef + from pypy.objspace.std.bytearraytype import bytearray_typedef from pypy.objspace.std.typetype import type_typedef from pypy.objspace.std.slicetype import slice_typedef from pypy.objspace.std.longtype import long_typedef @@ -71,6 +72,7 @@ from pypy.objspace.std import listobject from pypy.objspace.std import dictmultiobject from pypy.objspace.std import stringobject + from pypy.objspace.std import bytearrayobject from pypy.objspace.std import ropeobject from pypy.objspace.std import ropeunicodeobject from pypy.objspace.std import strsliceobject @@ -102,6 +104,7 @@ dictmultiobject.W_DictMultiObject: [], dictmultiobject.W_DictMultiIterObject: [], stringobject.W_StringObject: [], + bytearrayobject.W_BytearrayObject: [], typeobject.W_TypeObject: [], sliceobject.W_SliceObject: [], longobject.W_LongObject: [], Added: pypy/branch/fast-forward/pypy/objspace/std/test/test_bytes.py ============================================================================== --- (empty file) +++ pypy/branch/fast-forward/pypy/objspace/std/test/test_bytes.py Tue Sep 28 16:28:01 2010 @@ -0,0 +1,43 @@ + +class AppTestBytesArray: + def test_basics(self): + b = bytearray() + assert type(b) is bytearray + assert b.__class__ is bytearray + + def test_len(self): + b = bytearray('test') + assert len(b) == 4 + + def test_nohash(self): + raises(TypeError, hash, bytearray()) + + def test_repr(self): + assert repr(bytearray()) == "bytearray(b'')" + assert repr(bytearray('test')) == "bytearray(b'test')" + assert repr(bytearray("d'oh")) == r"bytearray(b'd\'oh')" + + def test_getitem(self): + b = bytearray('test') + assert b[0] == ord('t') + assert b[2] == ord('s') + raises(IndexError, b.__getitem__, 4) + assert b[1:5] == bytearray('est') + assert b[slice(1,5)] == bytearray('est') + + def test_arithmetic(self): + b1 = bytearray('hello ') + b2 = bytearray('world') + assert b1 + b2 == bytearray('hello world') + assert b1 * 2 == bytearray('hello hello ') + + def test_contains(self): + assert ord('l') in bytearray('hello') + + def test_iter(self): + assert list(bytearray('hello')) == [104, 101, 108, 108, 111] + + def test_compare(self): + assert bytearray('hello') < bytearray('world') + assert bytearray('world') > bytearray('hello') + From arigo at codespeak.net Tue Sep 28 16:42:44 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 28 Sep 2010 16:42:44 +0200 (CEST) Subject: [pypy-svn] r77455 - pypy/branch/minimark-free Message-ID: <20100928144244.34BAC282BFB@codespeak.net> Author: arigo Date: Tue Sep 28 16:42:42 2010 New Revision: 77455 Added: pypy/branch/minimark-free/ - copied from r77454, pypy/trunk/ Log: A branch in which to mimic a part of obmalloc.c that I ignored so far: freeing memory if the memory pressure goes down. From antocuni at codespeak.net Tue Sep 28 17:02:01 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Tue, 28 Sep 2010 17:02:01 +0200 (CEST) Subject: [pypy-svn] r77456 - pypy/trunk/pypy/jit/metainterp/test Message-ID: <20100928150201.A88E3282BDD@codespeak.net> Author: antocuni Date: Tue Sep 28 17:02:00 2010 New Revision: 77456 Modified: pypy/trunk/pypy/jit/metainterp/test/oparser.py pypy/trunk/pypy/jit/metainterp/test/test_oparser.py Log: make oparser optionally less strict about what it can parse. This is needed to parse non-optimized loops from the logs Modified: pypy/trunk/pypy/jit/metainterp/test/oparser.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/oparser.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/oparser.py Tue Sep 28 17:02:00 2010 @@ -63,7 +63,8 @@ class OpParser(object): def __init__(self, input, cpu, namespace, type_system, boxkinds, - invent_fail_descr=default_fail_descr): + invent_fail_descr=default_fail_descr, + nonstrict=False): self.input = input self.vars = {} self.cpu = cpu @@ -75,6 +76,7 @@ else: self._cache = {} self.invent_fail_descr = invent_fail_descr + self.nonstrict = nonstrict self.looptoken = LoopToken() def get_const(self, name, typ): @@ -133,11 +135,14 @@ vars = [] for elem in elements: elem = elem.strip() - box = self.box_for_var(elem) - vars.append(box) - self.vars[elem] = box + vars.append(self.newvar(elem)) return vars + def newvar(self, elem): + box = self.box_for_var(elem) + self.vars[elem] = box + return box + def is_float(self, arg): try: float(arg) @@ -170,6 +175,8 @@ elif arg.startswith('ConstPtr('): name = arg[len('ConstPtr('):-1] return self.get_const(name, 'ptr') + if arg not in self.vars and self.nonstrict: + self.newvar(arg) return self.vars[arg] def parse_op(self, line): @@ -210,7 +217,7 @@ if rop._GUARD_FIRST <= opnum <= rop._GUARD_LAST: i = line.find('[', endnum) + 1 j = line.find(']', i) - if i <= 0 or j <= 0: + if (i <= 0 or j <= 0) and not self.nonstrict: raise ParseError("missing fail_args for guard operation") fail_args = [] if i < j: @@ -276,11 +283,14 @@ lines = self.input.splitlines() ops = [] newlines = [] + first_comment = None for line in lines: # for simplicity comments are not allowed on # debug_merge_point lines if '#' in line and 'debug_merge_point(' not in line: if line.lstrip()[0] == '#': # comment only + if first_comment is None: + first_comment = line continue comm = line.rfind('#') rpar = line.find(')') # assume there's a op(...) @@ -289,12 +299,12 @@ if not line.strip(): continue # a comment or empty line newlines.append(line) - base_indent, inpargs = self.parse_inpargs(newlines[0]) - newlines = newlines[1:] + base_indent, inpargs, newlines = self.parse_inpargs(newlines) num, ops = self.parse_ops(base_indent, newlines, 0) if num < len(newlines): raise ParseError("unexpected dedent at line: %s" % newlines[num]) loop = ExtendedTreeLoop("loop") + loop.comment = first_comment loop.token = self.looptoken loop.operations = ops loop.inputargs = inpargs @@ -315,23 +325,27 @@ num += 1 return num, ops - def parse_inpargs(self, line): - base_indent = line.find('[') + def parse_inpargs(self, lines): + line = lines[0] + base_indent = len(line) - len(line.lstrip(' ')) line = line.strip() + if not line.startswith('[') and self.nonstrict: + return base_indent, [], lines + lines = lines[1:] if line == '[]': - return base_indent, [] - if base_indent == -1 or not line.endswith(']'): + return base_indent, [], lines + if not line.startswith('[') or not line.endswith(']'): raise ParseError("Wrong header: %s" % line) inpargs = self.parse_header_line(line[1:-1]) - return base_indent, inpargs + return base_indent, inpargs, lines def parse(input, cpu=None, namespace=None, type_system='lltype', boxkinds=None, invent_fail_descr=default_fail_descr, - no_namespace=False): + no_namespace=False, nonstrict=False): if namespace is None and not no_namespace: namespace = {} return OpParser(input, cpu, namespace, type_system, boxkinds, - invent_fail_descr).parse() + invent_fail_descr, nonstrict).parse() def pure_parse(*args, **kwds): kwds['invent_fail_descr'] = None Modified: pypy/trunk/pypy/jit/metainterp/test/test_oparser.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/test/test_oparser.py (original) +++ pypy/trunk/pypy/jit/metainterp/test/test_oparser.py Tue Sep 28 17:02:00 2010 @@ -175,6 +175,10 @@ def test_parse_no_namespace(): loop = parse(example_loop_log, no_namespace=True) +def test_attach_comment_to_loop(): + loop = parse(example_loop_log, no_namespace=True) + assert loop.comment == '# bridge out of Guard12, 6 ops' + def test_parse_new_with_comma(): # this is generated by PYPYJITLOG, check that we can handle it x = ''' @@ -183,3 +187,19 @@ ''' loop = parse(x) assert loop.operations[0].getopname() == 'new' + +def test_no_fail_args(): + x = ''' + [i0] + guard_true(i0, descr=) + ''' + loop = parse(x, nonstrict=True) + assert loop.operations[0].getfailargs() == [] + +def test_no_inputargs(): + x = ''' + i2 = int_add(i0, i1) + ''' + loop = parse(x, nonstrict=True) + assert loop.inputargs == [] + assert loop.operations[0].getopname() == 'int_add' From antocuni at codespeak.net Tue Sep 28 18:11:17 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Tue, 28 Sep 2010 18:11:17 +0200 (CEST) Subject: [pypy-svn] r77457 - pypy/trunk/pypy/jit/tool Message-ID: <20100928161117.DBB3B282C3F@codespeak.net> Author: antocuni Date: Tue Sep 28 18:11:16 2010 New Revision: 77457 Modified: pypy/trunk/pypy/jit/tool/loopviewer.py Log: add an option to save the differences between non-optimized loops and optimized loops Modified: pypy/trunk/pypy/jit/tool/loopviewer.py ============================================================================== --- pypy/trunk/pypy/jit/tool/loopviewer.py (original) +++ pypy/trunk/pypy/jit/tool/loopviewer.py Tue Sep 28 18:11:16 2010 @@ -16,27 +16,46 @@ def main(loopfile, options): print 'Loading file:' log = logparser.parse_log_file(loopfile) - print - loops = logparser.extract_category(log, "jit-log-opt-") - if options.loopnum is None: - input_loops = loops - else: - input_loops = [loops[options.loopnum]] - loops = [parse(inp, no_namespace=True) for inp in input_loops] + loops, summary = consider_category(log, options, "jit-log-opt-") if not options.quiet: for loop in loops: loop.show() + if options.summary: - summary = {} - for loop in loops: - summary = loop.summary(summary) + print print 'Summary:' print_summary(summary) + if options.diff: + # non-optimized loops and summary + nloops, nsummary = consider_category(log, options, "jit-log-noopt-") + print + print 'Summary of optimized-away operations' + diff = {} + keys = set(summary.keys()).union(set(nsummary)) + for key in keys: + diff[key] = nsummary.get(key, 0) - summary.get(key, 0) + print_summary(diff) + +def consider_category(log, options, category): + loops = logparser.extract_category(log, category) + if options.loopnum is None: + input_loops = loops + else: + input_loops = [loops[options.loopnum]] + loops = [parse(inp, no_namespace=True, nonstrict=True) + for inp in input_loops] + summary = {} + for loop in loops: + summary = loop.summary(summary) + return loops, summary + + def print_summary(summary): - keys = sorted(summary) - for key in keys: - print '%4d' % summary[key], key + ops = [(summary[key], key) for key in summary] + ops.sort(reverse=True) + for n, key in ops: + print '%4d' % n, key if __name__ == '__main__': parser = optparse.OptionParser(usage="%prog loopfile [options]") @@ -46,6 +65,8 @@ help='show all loops in the file') parser.add_option('-s', '--summary', dest='summary', action='store_true', default=False, help='print a summary of the operations in the loop(s)') + parser.add_option('-d', '--diff', dest='diff', action='store_true', default=False, + help='print the difference between non-optimized and optimized operations in the loop(s)') parser.add_option('-q', '--quiet', dest='quiet', action='store_true', default=False, help='do not show the graphical representation of the loop') From antocuni at codespeak.net Tue Sep 28 18:33:08 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Tue, 28 Sep 2010 18:33:08 +0200 (CEST) Subject: [pypy-svn] r77458 - pypy/trunk/pypy/jit/tool Message-ID: <20100928163308.EC172282C03@codespeak.net> Author: antocuni Date: Tue Sep 28 18:33:07 2010 New Revision: 77458 Modified: pypy/trunk/pypy/jit/tool/loopviewer.py Log: display the details of operations before and after optimizations Modified: pypy/trunk/pypy/jit/tool/loopviewer.py ============================================================================== --- pypy/trunk/pypy/jit/tool/loopviewer.py (original) +++ pypy/trunk/pypy/jit/tool/loopviewer.py Tue Sep 28 18:33:07 2010 @@ -31,11 +31,14 @@ nloops, nsummary = consider_category(log, options, "jit-log-noopt-") print print 'Summary of optimized-away operations' + print diff = {} keys = set(summary.keys()).union(set(nsummary)) for key in keys: - diff[key] = nsummary.get(key, 0) - summary.get(key, 0) - print_summary(diff) + before = nsummary.get(key, 0) + after = summary.get(key, 0) + diff[key] = (before-after, before, after) + print_diff(diff) def consider_category(log, options, category): loops = logparser.extract_category(log, category) @@ -55,7 +58,19 @@ ops = [(summary[key], key) for key in summary] ops.sort(reverse=True) for n, key in ops: - print '%4d' % n, key + print '%5d' % n, key + +def print_diff(diff): + ops = [(d, before, after, key) for key, (d, before, after) in diff.iteritems()] + ops.sort(reverse=True) + tot_before = 0 + tot_after = 0 + for d, before, after, key in ops: + tot_before += before + tot_after += after + print '%5d - %5d = %5d ' % (before, after, d), key + print '-' * 50 + print '%5d - %5d = %5d ' % (tot_before, tot_after, tot_before-tot_after), 'TOTAL' if __name__ == '__main__': parser = optparse.OptionParser(usage="%prog loopfile [options]") From afa at codespeak.net Tue Sep 28 19:02:56 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 28 Sep 2010 19:02:56 +0200 (CEST) Subject: [pypy-svn] r77459 - pypy/branch/fast-forward/pypy/objspace/std Message-ID: <20100928170256.74840282C03@codespeak.net> Author: afa Date: Tue Sep 28 19:02:55 2010 New Revision: 77459 Modified: pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py pypy/branch/fast-forward/pypy/objspace/std/bytearraytype.py Log: Translation fixes Modified: pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py Tue Sep 28 19:02:55 2010 @@ -49,7 +49,8 @@ length = len(data) start, stop, step, slicelength = w_slice.indices4(space, length) assert slicelength >= 0 - return W_BytearrayObject(data[start:stop:step]) + newdata = [data[start + i*step] for i in range(slicelength)] + return W_BytearrayObject(newdata) def getslice__Bytearray_ANY_ANY(space, w_bytearray, w_start, w_stop): length = len(w_bytearray.data) @@ -159,20 +160,19 @@ return space.wrap(buf.build()) -def getnewargs__Bytearray(space, w_bytearray): - return space.newbytearray([W_BytearrayObject(w_bytearray.wrappeditems)]) - -def bytearray_count__Bytearray_ANY(space, w_bytearray, w_obj): +def bytearray_count__Bytearray_Int(space, w_bytearray, w_char): + char = w_char.intval count = 0 - for w_item in w_bytearray.wrappeditems: - if space.eq_w(w_item, w_obj): + for c in w_bytearray.data: + if ord(c) == char: count += 1 return space.wrap(count) -def bytearray_index__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_obj, w_start, w_stop): +def bytearray_index__Bytearray_Int_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): + char = w_char.intval start = slicetype._Eval_SliceIndex(space, w_start) stop = slicetype._Eval_SliceIndex(space, w_stop) - length = len(w_bytearray.wrappeditems) + length = len(w_bytearray.data) if start < 0: start += length if start < 0: @@ -182,8 +182,8 @@ if stop < 0: stop = 0 for i in range(start, min(stop, length)): - w_item = w_bytearray.wrappeditems[i] - if space.eq_w(w_item, w_obj): + c = w_bytearray.data[i] + if ord(c) == char: return space.wrap(i) raise OperationError(space.w_ValueError, space.wrap("bytearray.index(x): x not in bytearray")) Modified: pypy/branch/fast-forward/pypy/objspace/std/bytearraytype.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/bytearraytype.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/bytearraytype.py Tue Sep 28 19:02:55 2010 @@ -20,10 +20,7 @@ def descr__new__(space, w_bytearraytype, w_source='', w_encoding=None, w_errors=None): from pypy.objspace.std.bytearrayobject import W_BytearrayObject - if w_source is None: - data = [] - else: - data = space.str_w(w_source) + data = [c for c in space.str_w(w_source)] w_obj = space.allocate_instance(W_BytearrayObject, w_bytearraytype) W_BytearrayObject.__init__(w_obj, data) return w_obj From afa at codespeak.net Tue Sep 28 19:50:02 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Tue, 28 Sep 2010 19:50:02 +0200 (CEST) Subject: [pypy-svn] r77460 - pypy/branch/fast-forward/lib_pypy Message-ID: <20100928175002.129E3282C3C@codespeak.net> Author: afa Date: Tue Sep 28 19:50:00 2010 New Revision: 77460 Modified: pypy/branch/fast-forward/lib_pypy/hashlib.py Log: Typo Modified: pypy/branch/fast-forward/lib_pypy/hashlib.py ============================================================================== --- pypy/branch/fast-forward/lib_pypy/hashlib.py (original) +++ pypy/branch/fast-forward/lib_pypy/hashlib.py Tue Sep 28 19:50:00 2010 @@ -116,4 +116,4 @@ return new for __name in algorithms: - globals()[name] = __getfunc(__name) + globals()[__name] = __getfunc(__name) From fijal at codespeak.net Wed Sep 29 10:08:07 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 29 Sep 2010 10:08:07 +0200 (CEST) Subject: [pypy-svn] r77461 - pypy/trunk/pypy/jit/backend/x86 Message-ID: <20100929080807.2093E282B9D@codespeak.net> Author: fijal Date: Wed Sep 29 10:08:05 2010 New Revision: 77461 Modified: pypy/trunk/pypy/jit/backend/x86/regalloc.py Log: A missing part of yesterday's merge Modified: pypy/trunk/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/trunk/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/trunk/pypy/jit/backend/x86/regalloc.py Wed Sep 29 10:08:05 2010 @@ -224,7 +224,7 @@ assert tmpreg not in nonfloatlocs assert xmmtmp not in floatlocs # note: we need to make a copy of inputargs because possibly_free_vars - # is also used on op.args, which is a non-resizable list + # is also used on op args, which is a non-resizable list self.possibly_free_vars(list(inputargs)) return nonfloatlocs, floatlocs @@ -307,7 +307,7 @@ if reg not in used: self.xrm.free_regs.append(reg) # note: we need to make a copy of inputargs because possibly_free_vars - # is also used on op.args, which is a non-resizable list + # is also used on op args, which is a non-resizable list self.possibly_free_vars(list(inputargs)) self.rm._check_invariants() self.xrm._check_invariants() @@ -956,28 +956,29 @@ def consider_copystrcontent(self, op): # compute the source address - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - ofs_loc = self.rm.make_sure_var_in_reg(op.args[2], op.args) - self.rm.possibly_free_var(op.args[0]) - self.rm.possibly_free_var(op.args[2]) + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(args[0], args) + ofs_loc = self.rm.make_sure_var_in_reg(args[2], args) + self.rm.possibly_free_var(args[0]) + self.rm.possibly_free_var(args[2]) srcaddr_box = TempBox() srcaddr_loc = self.rm.force_allocate_reg(srcaddr_box) self._gen_address_inside_string(base_loc, ofs_loc, srcaddr_loc) # compute the destination address - base_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args) - ofs_loc = self.rm.make_sure_var_in_reg(op.args[3], op.args) - self.rm.possibly_free_var(op.args[1]) - self.rm.possibly_free_var(op.args[3]) + base_loc = self.rm.make_sure_var_in_reg(args[1], args) + ofs_loc = self.rm.make_sure_var_in_reg(args[3], args) + self.rm.possibly_free_var(args[1]) + self.rm.possibly_free_var(args[3]) dstaddr_box = TempBox() dstaddr_loc = self.rm.force_allocate_reg(dstaddr_box) self._gen_address_inside_string(base_loc, ofs_loc, dstaddr_loc) # call memcpy() - length_loc = self.loc(op.args[4]) + length_loc = self.loc(args[4]) self.rm.before_call() self.xrm.before_call() self.assembler._emit_call(imm(self.assembler.memcpy_addr), [dstaddr_loc, srcaddr_loc, length_loc]) - self.rm.possibly_free_var(op.args[4]) + self.rm.possibly_free_var(args[4]) self.rm.possibly_free_var(dstaddr_box) self.rm.possibly_free_var(srcaddr_box) From arigo at codespeak.net Wed Sep 29 13:03:52 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 29 Sep 2010 13:03:52 +0200 (CEST) Subject: [pypy-svn] r77462 - pypy/trunk/pypy/rpython/lltypesystem Message-ID: <20100929110352.5A154282B9D@codespeak.net> Author: arigo Date: Wed Sep 29 13:03:50 2010 New Revision: 77462 Modified: pypy/trunk/pypy/rpython/lltypesystem/rstr.py Log: Add an _annenforceargs_ to prevent some random translation failures. Modified: pypy/trunk/pypy/rpython/lltypesystem/rstr.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/rstr.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/rstr.py Wed Sep 29 13:03:50 2010 @@ -703,6 +703,7 @@ s1.copy_contents(s1, newstr, start, 0, lgt) return newstr _ll_stringslice.oopspec = 'stroruni.slice(s1, start, stop)' + _ll_stringslice._annenforceargs_ = [None, int, int] def ll_stringslice_startonly(s1, start): return LLHelpers._ll_stringslice(s1, start, len(s1.chars)) From arigo at codespeak.net Wed Sep 29 13:04:24 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 29 Sep 2010 13:04:24 +0200 (CEST) Subject: [pypy-svn] r77463 - in pypy/trunk/pypy/rpython/memory: gc gctransform test Message-ID: <20100929110424.5C5BB282B9D@codespeak.net> Author: arigo Date: Wed Sep 29 13:04:22 2010 New Revision: 77463 Modified: pypy/trunk/pypy/rpython/memory/gc/base.py pypy/trunk/pypy/rpython/memory/gc/minimark.py pypy/trunk/pypy/rpython/memory/gctransform/framework.py pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py Log: find_clean_setarrayitems() is not a valid optimization in the presence of card marking! Disable it in this case. Modified: pypy/trunk/pypy/rpython/memory/gc/base.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/base.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/base.py Wed Sep 29 13:04:22 2010 @@ -39,6 +39,9 @@ def can_malloc_nonmovable(self): return not self.moving_gc + def can_optimize_clean_setarrayitems(self): + return True # False in case of card marking + # The following flag enables costly consistency checks after each # collection. It is automatically set to True by test_gc.py. The # checking logic is translatable, so the flag can be set to True Modified: pypy/trunk/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/minimark.py Wed Sep 29 13:04:22 2010 @@ -564,6 +564,11 @@ def can_malloc_nonmovable(self): return True + def can_optimize_clean_setarrayitems(self): + if self.card_page_indices > 0: + return False + return MovingGCBase.can_optimize_clean_setarrayitems(self) + def can_move(self, obj): """Overrides the parent can_move().""" return self.is_in_nursery(obj) @@ -688,8 +693,9 @@ "unexpected GCFLAG_CARDS_SET") # if the GCFLAG_HAS_CARDS is set, check that all bits are zero now if self.header(obj).tid & GCFLAG_HAS_CARDS: - ll_assert(self.card_page_indices > 0, - "GCFLAG_HAS_CARDS but not using card marking") + if self.card_page_indices <= 0: + ll_assert(False, "GCFLAG_HAS_CARDS but not using card marking") + return typeid = self.get_type_id(obj) ll_assert(self.has_gcptr_in_varsize(typeid), "GCFLAG_HAS_CARDS but not has_gcptr_in_varsize") @@ -961,6 +967,8 @@ if cardbyte & 1: if interval_stop > length: interval_stop = length + ll_assert(cardbyte <= 1 and bytes == 0, + "premature end of object") self.trace_and_drag_out_of_nursery_partial( obj, interval_start, interval_stop) # Modified: pypy/trunk/pypy/rpython/memory/gctransform/framework.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gctransform/framework.py (original) +++ pypy/trunk/pypy/rpython/memory/gctransform/framework.py Wed Sep 29 13:04:22 2010 @@ -606,8 +606,10 @@ if self.write_barrier_ptr: self.clean_sets = ( - find_clean_setarrayitems(self.collect_analyzer, graph).union( - find_initializing_stores(self.collect_analyzer, graph))) + find_initializing_stores(self.collect_analyzer, graph)) + if self.gcdata.gc.can_optimize_clean_setarrayitems(): + self.clean_sets = self.clean_sets.union( + find_clean_setarrayitems(self.collect_analyzer, graph)) super(FrameworkGCTransformer, self).transform_graph(graph) if self.write_barrier_ptr: self.clean_sets = None Modified: pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py (original) +++ pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py Wed Sep 29 13:04:22 2010 @@ -1474,10 +1474,37 @@ 'page_size': 16*WORD, 'arena_size': 64*WORD, 'small_request_threshold': 5*WORD, + 'large_object': 8*WORD, + 'large_object_gcptrs': 10*WORD, 'card_page_indices': 4, } root_stack_depth = 200 + def define_no_clean_setarrayitems(cls): + # The optimization find_clean_setarrayitems() in + # gctransformer/framework.py does not work with card marking. + # Check that it is turned off. + S = lltype.GcStruct('S', ('x', lltype.Signed)) + A = lltype.GcArray(lltype.Ptr(S)) + def sub(lst): + lst[15] = lltype.malloc(S) # 'lst' is set the single mark "12-15" + lst[15].x = 123 + lst[0] = lst[15] # that would be a "clean_setarrayitem" + def f(): + lst = lltype.malloc(A, 16) # 16 > 10 + rgc.collect() + sub(lst) + null = lltype.nullptr(S) + lst[15] = null # clear, so that A() is only visible via lst[0] + rgc.collect() # -> crash + return lst[0].x + return f + + def test_no_clean_setarrayitems(self): + run = self.runner("no_clean_setarrayitems") + res = run([]) + assert res == 123 + # ________________________________________________________________ # tagged pointers From arigo at codespeak.net Wed Sep 29 13:05:48 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 29 Sep 2010 13:05:48 +0200 (CEST) Subject: [pypy-svn] r77464 - in pypy/branch/minimark-free/pypy/rpython: lltypesystem memory/gc memory/gc/test Message-ID: <20100929110548.7A1BC282B9D@codespeak.net> Author: arigo Date: Wed Sep 29 13:05:46 2010 New Revision: 77464 Modified: pypy/branch/minimark-free/pypy/rpython/lltypesystem/llarena.py pypy/branch/minimark-free/pypy/rpython/memory/gc/minimarkpage.py pypy/branch/minimark-free/pypy/rpython/memory/gc/test/test_minimarkpage.py Log: Tweak minimarkpage to free unused arenas. Modified: pypy/branch/minimark-free/pypy/rpython/lltypesystem/llarena.py ============================================================================== --- pypy/branch/minimark-free/pypy/rpython/lltypesystem/llarena.py (original) +++ pypy/branch/minimark-free/pypy/rpython/lltypesystem/llarena.py Wed Sep 29 13:05:46 2010 @@ -124,6 +124,9 @@ assert self.usagemap[i] == 'x' self.usagemap[i] = '#' + def mark_freed(self): + self.freed = True # this method is a hook for tests + class fakearenaaddress(llmemory.fakeaddress): def __init__(self, arena, offset): @@ -314,7 +317,7 @@ assert arena_addr.offset == 0 arena_addr.arena.reset(False) assert not arena_addr.arena.objectptrs - arena_addr.arena.freed = True + arena_addr.arena.mark_freed() def arena_reset(arena_addr, size, zero): """Free all objects in the arena, which can then be reused. Modified: pypy/branch/minimark-free/pypy/rpython/memory/gc/minimarkpage.py ============================================================================== --- pypy/branch/minimark-free/pypy/rpython/memory/gc/minimarkpage.py (original) +++ pypy/branch/minimark-free/pypy/rpython/memory/gc/minimarkpage.py Wed Sep 29 13:05:46 2010 @@ -4,15 +4,45 @@ from pypy.rlib.debug import ll_assert WORD = LONG_BIT // 8 -WORD_POWER_2 = {32: 2, 64: 3}[LONG_BIT] NULL = llmemory.NULL +WORD_POWER_2 = {32: 2, 64: 3}[LONG_BIT] +assert 1 << WORD_POWER_2 == WORD -# Terminology: the memory is subdivided into "pages". +# Terminology: the memory is subdivided into "arenas" containing "pages". # A page contains a number of allocated objects, called "blocks". -# The actual allocation occurs in whole arenas, which are subdivided -# into pages. We don't keep track of the arenas. A page can be: +# The actual allocation occurs in whole arenas, which are then subdivided +# into pages. For each arena we allocate one of the following structures: + +ARENA_PTR = lltype.Ptr(lltype.ForwardReference()) +ARENA = lltype.Struct('ArenaReference', + # -- The address of the arena, as returned by malloc() + ('base', llmemory.Address), + # -- The number of free and the total number of pages in the arena + ('nfreepages', lltype.Signed), + ('totalpages', lltype.Signed), + # -- A chained list of free pages in the arena. Ends with NULL. + ('freepages', llmemory.Address), + # -- A linked list of arenas. See below. + ('nextarena', ARENA_PTR), + ) +ARENA_PTR.TO.become(ARENA) +ARENA_NULL = lltype.nullptr(ARENA) + +# The idea is that when we need a free page, we take it from the arena +# which currently has the *lowest* number of free pages. This allows +# arenas with a lot of free pages to eventually become entirely free, at +# which point they are returned to the OS. If an arena has a total of +# 64 pages, then we have 64 global lists, arenas_lists[0] to +# arenas_lists[63], such that arenas_lists[i] contains exactly those +# arenas that have 'nfreepages == i'. We allocate pages out of the +# arena in 'current_arena'; when it is exhausted we pick another arena +# with the smallest value for nfreepages (but > 0). + +# ____________________________________________________________ +# +# Each page in an arena can be: # # - uninitialized: never touched so far. # @@ -20,11 +50,12 @@ # PAGE_HEADER. The page is on the chained list of pages that still have # room for objects of that size, unless it is completely full. # -# - free: used to be partially full, and is now free again. The page is -# on the chained list of free pages. +# - free. The page is on the chained list of free pages 'freepages' from +# its arena. -# Similarily, each allocated page contains blocks of a given size, which can -# be either uninitialized, allocated or free. +# Each allocated page contains blocks of a given size, which can be in +# one of three states: allocated, free, or uninitialized. The uninitialized +# blocks (initially all of them) are a tail of the page. PAGE_PTR = lltype.Ptr(lltype.ForwardReference()) PAGE_HEADER = lltype.Struct('PageHeader', @@ -32,13 +63,16 @@ # pages, it is a chained list of pages having the same size class, # rooted in 'page_for_size[size_class]'. For full pages, it is a # different chained list rooted in 'full_page_for_size[size_class]'. + # For free pages, it is the list 'freepages' in the arena header. ('nextpage', PAGE_PTR), - # -- The number of free blocks, and the number of uninitialized blocks. - # The number of allocated blocks is the rest. - ('nuninitialized', lltype.Signed), + # -- The arena this page is part of. + ('arena', ARENA_PTR), + # -- The number of free blocks. The numbers of uninitialized and + # allocated blocks can be deduced from the context if needed. ('nfree', lltype.Signed), - # -- The chained list of free blocks. If there are none, points to the - # first uninitialized block. + # -- The chained list of free blocks. It ends as a pointer to the + # first uninitialized block (pointing to data that is uninitialized, + # or to the end of the page). ('freeblock', llmemory.Address), # -- The structure above is 4 words, which is a good value: # '(1024-4) % N' is zero or very small for various small N's, @@ -72,13 +106,35 @@ self.nblocks_for_size = lltype.malloc(rffi.CArray(lltype.Signed), length, flavor='raw') self.hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) + assert page_size > self.hdrsize self.nblocks_for_size[0] = 0 # unused for i in range(1, length): self.nblocks_for_size[i] = (page_size - self.hdrsize) // (WORD * i) # - self.uninitialized_pages = NULL + self.max_pages_per_arena = arena_size // page_size + self.arenas_lists = lltype.malloc(rffi.CArray(ARENA_PTR), + self.max_pages_per_arena, + flavor='raw', zero=True) + # this is used in mass_free() only + self.old_arenas_lists = lltype.malloc(rffi.CArray(ARENA_PTR), + self.max_pages_per_arena, + flavor='raw', zero=True) + # + # the arena currently consumed; it must have at least one page + # available, or be NULL. The arena object that we point to is + # not in any 'arenas_lists'. We will consume all its pages before + # we choose a next arena, even if there is a major collection + # in-between. + self.current_arena = ARENA_NULL + # + # guarantee that 'arenas_lists[1:min_empty_nfreepages]' are all empty + self.min_empty_nfreepages = self.max_pages_per_arena + # + # part of current_arena might still contain uninitialized pages self.num_uninitialized_pages = 0 - self.free_pages = NULL + # + # the total memory used, counting every block in use, without + # the additional bookkeeping stuff. self.total_memory_used = r_uint(0) @@ -109,16 +165,12 @@ # else: # The 'result' is part of the uninitialized blocks. - ll_assert(page.nuninitialized > 0, - "fully allocated page found in the page_for_size list") - page.nuninitialized -= 1 - if page.nuninitialized > 0: - freeblock = result + nsize - else: - freeblock = NULL + freeblock = result + nsize # page.freeblock = freeblock - if freeblock == NULL: + # + pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page)) + if freeblock - pageaddr > self.page_size - nsize: # This was the last free block, so unlink the page from the # chained list and put it in the 'full_page_for_size' list. self.page_for_size[size_class] = page.nextpage @@ -132,37 +184,83 @@ def allocate_new_page(self, size_class): """Allocate and return a new page for the given size_class.""" # - if self.free_pages != NULL: + # Allocate a new arena if needed. + if self.current_arena == ARENA_NULL: + self.allocate_new_arena() + # + # The result is simply 'current_arena.freepages'. + arena = self.current_arena + result = arena.freepages + if arena.nfreepages > 0: + # + # The 'result' was part of the chained list; read the next. + arena.nfreepages -= 1 + freepages = result.address[0] + llarena.arena_reset(result, + llmemory.sizeof(llmemory.Address), + 0) # - # Get the page from the chained list 'free_pages'. - page = self.free_pages - self.free_pages = page.address[0] - llarena.arena_reset(page, llmemory.sizeof(llmemory.Address), 0) else: - # Get the next free page from the uninitialized pages. - if self.num_uninitialized_pages == 0: - self.allocate_new_arena() # Out of memory. Get a new arena. - page = self.uninitialized_pages - self.uninitialized_pages += self.page_size + # The 'result' is part of the uninitialized pages. + ll_assert(self.num_uninitialized_pages > 0, + "fully allocated arena found in self.current_arena") self.num_uninitialized_pages -= 1 + if self.num_uninitialized_pages > 0: + freepages = result + self.page_size + else: + freepages = NULL # - # Initialize the fields of the resulting page - llarena.arena_reserve(page, llmemory.sizeof(PAGE_HEADER)) - result = llmemory.cast_adr_to_ptr(page, PAGE_PTR) + arena.freepages = freepages + if freepages == NULL: + # This was the last page, so put the arena away into + # arenas_lists[0]. + arena.nextarena = self.arenas_lists[0] + self.arenas_lists[0] = arena + self.current_arena = ARENA_NULL # - result.nuninitialized = self.nblocks_for_size[size_class] - result.nfree = 0 - result.freeblock = page + self.hdrsize - result.nextpage = PAGE_NULL + # Initialize the fields of the resulting page + llarena.arena_reserve(result, llmemory.sizeof(PAGE_HEADER)) + page = llmemory.cast_adr_to_ptr(result, PAGE_PTR) + page.arena = arena + page.nfree = 0 + page.freeblock = result + self.hdrsize + page.nextpage = PAGE_NULL ll_assert(self.page_for_size[size_class] == PAGE_NULL, "allocate_new_page() called but a page is already waiting") - self.page_for_size[size_class] = result - return result + self.page_for_size[size_class] = page + return page + + + def _all_arenas(self): + """For testing. Enumerates all arenas.""" + if self.current_arena: + yield self.current_arena + for arena in self.arenas_lists: + while arena: + yield arena + arena = arena.nextarena def allocate_new_arena(self): - ll_assert(self.num_uninitialized_pages == 0, - "some uninitialized pages are already waiting") + """Return in self.current_arena the arena to allocate from next.""" + # + # Pick an arena from 'arenas_lists[i]', with i as small as possible + # but > 0. Use caching with 'min_empty_nfreepages', which guarantees + # that 'arenas_lists[1:min_empty_nfreepages]' are all empty. + i = self.min_empty_nfreepages + while i < self.max_pages_per_arena: + # + if self.arenas_lists[i] != ARENA_NULL: + # + # Found it. + self.current_arena = self.arenas_lists[i] + self.arenas_lists[i] = self.current_arena.nextarena + return + # + i += 1 + self.min_empty_nfreepages = i + # + # No more arena with any free page. We must allocate a new arena. # # 'arena_base' points to the start of malloced memory; it might not # be a page-aligned address @@ -177,13 +275,15 @@ # 'npages' is the number of full pages just allocated npages = (arena_end - firstpage) // self.page_size # - # add these pages to the list - self.uninitialized_pages = firstpage + # Allocate an ARENA object and initialize it + arena = lltype.malloc(ARENA, flavor='raw') + arena.base = arena_base + arena.nfreepages = 0 # they are all uninitialized pages + arena.totalpages = npages + arena.freepages = firstpage self.num_uninitialized_pages = npages + self.current_arena = arena # - # increase a bit arena_size for the next time - self.arena_size = (self.arena_size // 4 * 5) + (self.page_size - 1) - self.arena_size = (self.arena_size // self.page_size) * self.page_size allocate_new_arena._dont_inline_ = True @@ -199,16 +299,51 @@ # # Walk the pages in 'page_for_size[size_class]' and # 'full_page_for_size[size_class]' and free some objects. - # Pages completely freed are added to 'self.free_pages', and - # become available for reuse by any size class. Pages not - # completely freed are re-chained either in + # Pages completely freed are added to 'page.arena.freepages', + # and become available for reuse by any size class. Pages + # not completely freed are re-chained either in # 'full_page_for_size[]' or 'page_for_size[]'. - self.mass_free_in_page(size_class, ok_to_free_func) + self.mass_free_in_pages(size_class, ok_to_free_func) # size_class -= 1 + # + # Rehash arenas into the correct arenas_lists[i]. If + # 'self.current_arena' contains an arena too, it remains there. + (self.old_arenas_lists, self.arenas_lists) = ( + self.arenas_lists, self.old_arenas_lists) + # + i = 0 + while i < self.max_pages_per_arena: + self.arenas_lists[i] = ARENA_NULL + i += 1 + # + i = 0 + while i < self.max_pages_per_arena: + arena = self.old_arenas_lists[i] + while arena != ARENA_NULL: + nextarena = arena.nextarena + # + if arena.nfreepages == arena.totalpages: + # + # The whole arena is empty. Free it. + llarena.arena_free(arena.base) + lltype.free(arena, flavor='raw') + # + else: + # Insert 'arena' in the correct arenas_lists[n] + n = arena.nfreepages + ll_assert(n < self.max_pages_per_arena, + "totalpages != nfreepages >= max_pages_per_arena") + arena.nextarena = self.arenas_lists[n] + self.arenas_lists[n] = arena + # + arena = nextarena + i += 1 + # + self.min_empty_nfreepages = 1 - def mass_free_in_page(self, size_class, ok_to_free_func): + def mass_free_in_pages(self, size_class, ok_to_free_func): nblocks = self.nblocks_for_size[size_class] block_size = size_class * WORD remaining_partial_pages = PAGE_NULL @@ -224,8 +359,7 @@ while page != PAGE_NULL: # # Collect the page. - surviving = self.walk_page(page, block_size, - nblocks, ok_to_free_func) + surviving = self.walk_page(page, block_size, ok_to_free_func) nextpage = page.nextpage # if surviving == nblocks: @@ -259,19 +393,23 @@ def free_page(self, page): """Free a whole page.""" # - # Done by inserting it in the 'free_pages' list. + # Insert the freed page in the arena's 'freepages' list. + # If nfreepages == totalpages, then it will be freed at the + # end of mass_free(). + arena = page.arena + arena.nfreepages += 1 pageaddr = llmemory.cast_ptr_to_adr(page) pageaddr = llarena.getfakearenaaddress(pageaddr) llarena.arena_reset(pageaddr, self.page_size, 0) llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address)) - pageaddr.address[0] = self.free_pages - self.free_pages = pageaddr + pageaddr.address[0] = arena.freepages + arena.freepages = pageaddr - def walk_page(self, page, block_size, nblocks, ok_to_free_func): + def walk_page(self, page, block_size, ok_to_free_func): """Walk over all objects in a page, and ask ok_to_free_func().""" # - # 'freeblock' is the next free block, or NULL if there isn't any more. + # 'freeblock' is the next free block freeblock = page.freeblock # # 'prevfreeblockat' is the address of where 'freeblock' was read from. @@ -281,22 +419,28 @@ obj = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page)) obj += self.hdrsize surviving = 0 # initially + skip_free_blocks = page.nfree # - nblocks -= page.nuninitialized - index = nblocks - while index > 0: + while True: # if obj == freeblock: # + if skip_free_blocks == 0: + # + # 'obj' points to the first uninitialized block, + # or to the end of the page if there are none. + break + # # 'obj' points to a free block. It means that # 'prevfreeblockat.address[0]' does not need to be updated. # Just read the next free block from 'obj.address[0]'. + skip_free_blocks -= 1 prevfreeblockat = obj freeblock = obj.address[0] # else: # 'obj' points to a valid object. - ll_assert(not freeblock or freeblock > obj, + ll_assert(freeblock > obj, "freeblocks are linked out of order") # if ok_to_free_func(obj): @@ -310,15 +454,14 @@ prevfreeblockat = obj obj.address[0] = freeblock # + # Update the number of free objects in the page. + page.nfree += 1 + # else: # The object survives. surviving += 1 # obj += block_size - index -= 1 - # - # Update the number of free objects in the page. - page.nfree = nblocks - surviving # # Update the global total size of objects. self.total_memory_used += surviving * block_size @@ -327,6 +470,20 @@ return surviving + def _nuninitialized(self, page, size_class): + # Helper for debugging: count the number of uninitialized blocks + freeblock = page.freeblock + for i in range(page.nfree): + freeblock = freeblock.address[0] + assert freeblock != NULL + pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page)) + num_initialized_blocks, rem = divmod( + freeblock - pageaddr - self.hdrsize, size_class * WORD) + assert rem == 0, "page size_class misspecified?" + nblocks = self.nblocks_for_size[size_class] + return nblocks - num_initialized_blocks + + # ____________________________________________________________ # Helpers to go from a pointer to the start of its page Modified: pypy/branch/minimark-free/pypy/rpython/memory/gc/test/test_minimarkpage.py ============================================================================== --- pypy/branch/minimark-free/pypy/rpython/memory/gc/test/test_minimarkpage.py (original) +++ pypy/branch/minimark-free/pypy/rpython/memory/gc/test/test_minimarkpage.py Wed Sep 29 13:05:46 2010 @@ -12,17 +12,19 @@ def test_allocate_arena(): - ac = ArenaCollection(SHIFT + 16*20, 16, 1) + ac = ArenaCollection(SHIFT + 64*20, 64, 1) ac.allocate_new_arena() assert ac.num_uninitialized_pages == 20 - ac.uninitialized_pages + 16*20 # does not raise - py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 16*20 + 1") + upages = ac.current_arena.freepages + upages + 64*20 # does not raise + py.test.raises(llarena.ArenaError, "upages + 64*20 + 1") # - ac = ArenaCollection(SHIFT + 16*20 + 7, 16, 1) + ac = ArenaCollection(SHIFT + 64*20 + 7, 64, 1) ac.allocate_new_arena() assert ac.num_uninitialized_pages == 20 - ac.uninitialized_pages + 16*20 + 7 # does not raise - py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 16*20 + 16") + upages = ac.current_arena.freepages + upages + 64*20 + 7 # does not raise + py.test.raises(llarena.ArenaError, "upages + 64*20 + 64") def test_allocate_new_page(): @@ -31,7 +33,8 @@ # def checknewpage(page, size_class): size = WORD * size_class - assert page.nuninitialized == (pagesize - hdrsize) // size + assert (ac._nuninitialized(page, size_class) == + (pagesize - hdrsize) // size) assert page.nfree == 0 page1 = page.freeblock - hdrsize assert llmemory.cast_ptr_to_adr(page) == page1 @@ -44,13 +47,13 @@ page = ac.allocate_new_page(5) checknewpage(page, 5) assert ac.num_uninitialized_pages == 2 - assert ac.uninitialized_pages - pagesize == cast_ptr_to_adr(page) + assert ac.current_arena.freepages - pagesize == cast_ptr_to_adr(page) assert ac.page_for_size[5] == page # page = ac.allocate_new_page(3) checknewpage(page, 3) assert ac.num_uninitialized_pages == 1 - assert ac.uninitialized_pages - pagesize == cast_ptr_to_adr(page) + assert ac.current_arena.freepages - pagesize == cast_ptr_to_adr(page) assert ac.page_for_size[3] == page # page = ac.allocate_new_page(4) @@ -71,17 +74,17 @@ page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR) if step == 1: page.nfree = 0 - page.nuninitialized = nblocks - nusedblocks + nuninitialized = nblocks - nusedblocks else: page.nfree = nusedblocks - page.nuninitialized = nblocks - 2*nusedblocks + nuninitialized = nblocks - 2*nusedblocks + page.freeblock = pageaddr + hdrsize + nusedblocks * size_block if nusedblocks < nblocks: - page.freeblock = pageaddr + hdrsize + nusedblocks * size_block chainedlists = ac.page_for_size else: - page.freeblock = NULL chainedlists = ac.full_page_for_size page.nextpage = chainedlists[size_class] + page.arena = ac.current_arena chainedlists[size_class] = page if fill_with_objects: for i in range(0, nusedblocks*step, step): @@ -98,11 +101,15 @@ prev = 'prevhole.address[0]' endaddr = pageaddr + hdrsize + 2*nusedblocks * size_block exec '%s = endaddr' % prev in globals(), locals() + assert ac._nuninitialized(page, size_class) == nuninitialized # ac.allocate_new_arena() num_initialized_pages = len(pagelayout.rstrip(" ")) - ac._startpageaddr = ac.uninitialized_pages - ac.uninitialized_pages += pagesize * num_initialized_pages + ac._startpageaddr = ac.current_arena.freepages + if pagelayout.endswith(" "): + ac.current_arena.freepages += pagesize * num_initialized_pages + else: + ac.current_arena.freepages = NULL ac.num_uninitialized_pages -= num_initialized_pages # for i in reversed(range(num_initialized_pages)): @@ -115,8 +122,9 @@ link(pageaddr, size_class, size_block, nblocks, nblocks-1) elif c == '.': # a free, but initialized, page llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address)) - pageaddr.address[0] = ac.free_pages - ac.free_pages = pageaddr + pageaddr.address[0] = ac.current_arena.freepages + ac.current_arena.freepages = pageaddr + ac.current_arena.nfreepages += 1 elif c == '#': # a random full page, in the list 'full_pages' size_class = fill_with_objects or 1 size_block = WORD * size_class @@ -142,26 +150,29 @@ def checkpage(ac, page, expected_position): assert llmemory.cast_ptr_to_adr(page) == pagenum(ac, expected_position) +def freepages(ac): + return ac.current_arena.freepages + def test_simple_arena_collection(): pagesize = hdrsize + 16 ac = arena_collection_for_test(pagesize, "##....# ") # - assert ac.free_pages == pagenum(ac, 2) + assert freepages(ac) == pagenum(ac, 2) page = ac.allocate_new_page(1); checkpage(ac, page, 2) - assert ac.free_pages == pagenum(ac, 3) + assert freepages(ac) == pagenum(ac, 3) page = ac.allocate_new_page(2); checkpage(ac, page, 3) - assert ac.free_pages == pagenum(ac, 4) + assert freepages(ac) == pagenum(ac, 4) page = ac.allocate_new_page(3); checkpage(ac, page, 4) - assert ac.free_pages == pagenum(ac, 5) + assert freepages(ac) == pagenum(ac, 5) page = ac.allocate_new_page(4); checkpage(ac, page, 5) - assert ac.free_pages == NULL and ac.num_uninitialized_pages == 3 + assert freepages(ac) == pagenum(ac, 7) and ac.num_uninitialized_pages == 3 page = ac.allocate_new_page(5); checkpage(ac, page, 7) - assert ac.free_pages == NULL and ac.num_uninitialized_pages == 2 + assert freepages(ac) == pagenum(ac, 8) and ac.num_uninitialized_pages == 2 page = ac.allocate_new_page(6); checkpage(ac, page, 8) - assert ac.free_pages == NULL and ac.num_uninitialized_pages == 1 + assert freepages(ac) == pagenum(ac, 9) and ac.num_uninitialized_pages == 1 page = ac.allocate_new_page(7); checkpage(ac, page, 9) - assert ac.free_pages == NULL and ac.num_uninitialized_pages == 0 + assert not ac.current_arena and ac.num_uninitialized_pages == 0 def chkob(ac, num_page, pos_obj, obj): @@ -205,47 +216,47 @@ ac = arena_collection_for_test(pagesize, "/.", fill_with_objects=2) page = getpage(ac, 0) assert page.nfree == 3 - assert page.nuninitialized == 3 + assert ac._nuninitialized(page, 2) == 3 chkob(ac, 0, 2*WORD, page.freeblock) # obj = ac.malloc(2*WORD); chkob(ac, 0, 2*WORD, obj) obj = ac.malloc(2*WORD); chkob(ac, 0, 6*WORD, obj) assert page.nfree == 1 - assert page.nuninitialized == 3 + assert ac._nuninitialized(page, 2) == 3 chkob(ac, 0, 10*WORD, page.freeblock) # obj = ac.malloc(2*WORD); chkob(ac, 0, 10*WORD, obj) assert page.nfree == 0 - assert page.nuninitialized == 3 + assert ac._nuninitialized(page, 2) == 3 chkob(ac, 0, 12*WORD, page.freeblock) # obj = ac.malloc(2*WORD); chkob(ac, 0, 12*WORD, obj) - assert page.nuninitialized == 2 + assert ac._nuninitialized(page, 2) == 2 obj = ac.malloc(2*WORD); chkob(ac, 0, 14*WORD, obj) obj = ac.malloc(2*WORD); chkob(ac, 0, 16*WORD, obj) assert page.nfree == 0 - assert page.nuninitialized == 0 + assert ac._nuninitialized(page, 2) == 0 obj = ac.malloc(2*WORD); chkob(ac, 1, 0*WORD, obj) def test_malloc_new_arena(): pagesize = hdrsize + 7*WORD ac = arena_collection_for_test(pagesize, "### ") + arena_size = ac.arena_size obj = ac.malloc(2*WORD); chkob(ac, 3, 0*WORD, obj) # 3rd page -> size 2 # del ac.allocate_new_arena # restore the one from the class - arena_size = ac.arena_size obj = ac.malloc(3*WORD) # need a new arena assert ac.num_uninitialized_pages == (arena_size // ac.page_size - - 1 # for start_of_page() - 1 # the just-allocated page ) class OkToFree(object): - def __init__(self, ac, answer): + def __init__(self, ac, answer, multiarenas=False): assert callable(answer) or 0.0 <= answer <= 1.0 self.ac = ac self.answer = answer + self.multiarenas = multiarenas self.lastnum = 0.0 self.seen = {} @@ -257,7 +268,10 @@ ok_to_free = self.lastnum >= 1.0 if ok_to_free: self.lastnum -= 1.0 - key = addr - self.ac._startpageaddr + if self.multiarenas: + key = (addr.arena, addr.offset) + else: + key = addr - self.ac._startpageaddr assert key not in self.seen self.seen[key] = ok_to_free return ok_to_free @@ -272,10 +286,10 @@ page = getpage(ac, 0) assert page == ac.page_for_size[2] assert page.nextpage == PAGE_NULL - assert page.nuninitialized == 1 + assert ac._nuninitialized(page, 2) == 1 assert page.nfree == 0 chkob(ac, 0, 4*WORD, page.freeblock) - assert ac.free_pages == NULL + assert freepages(ac) == NULL def test_mass_free_emptied_page(): pagesize = hdrsize + 7*WORD @@ -285,7 +299,7 @@ assert ok_to_free.seen == {hdrsize + 0*WORD: True, hdrsize + 2*WORD: True} pageaddr = pagenum(ac, 0) - assert pageaddr == ac.free_pages + assert pageaddr == freepages(ac) assert pageaddr.address[0] == NULL assert ac.page_for_size[2] == PAGE_NULL @@ -300,10 +314,9 @@ page = getpage(ac, 0) assert page == ac.full_page_for_size[2] assert page.nextpage == PAGE_NULL - assert page.nuninitialized == 0 + assert ac._nuninitialized(page, 2) == 0 assert page.nfree == 0 - assert page.freeblock == NULL - assert ac.free_pages == NULL + assert freepages(ac) == NULL assert ac.page_for_size[2] == PAGE_NULL def test_mass_free_full_is_partially_emptied(): @@ -319,19 +332,19 @@ pageaddr = pagenum(ac, 0) assert page == ac.page_for_size[2] assert page.nextpage == PAGE_NULL - assert page.nuninitialized == 0 + assert ac._nuninitialized(page, 2) == 0 assert page.nfree == 2 assert page.freeblock == pageaddr + hdrsize + 2*WORD assert page.freeblock.address[0] == pageaddr + hdrsize + 6*WORD - assert page.freeblock.address[0].address[0] == NULL - assert ac.free_pages == NULL + assert page.freeblock.address[0].address[0] == pageaddr + hdrsize + 8*WORD + assert freepages(ac) == NULL assert ac.full_page_for_size[2] == PAGE_NULL def test_mass_free_half_page_remains(): pagesize = hdrsize + 24*WORD ac = arena_collection_for_test(pagesize, "/", fill_with_objects=2) page = getpage(ac, 0) - assert page.nuninitialized == 4 + assert ac._nuninitialized(page, 2) == 4 assert page.nfree == 4 # ok_to_free = OkToFree(ac, False) @@ -344,7 +357,7 @@ pageaddr = pagenum(ac, 0) assert page == ac.page_for_size[2] assert page.nextpage == PAGE_NULL - assert page.nuninitialized == 4 + assert ac._nuninitialized(page, 2) == 4 assert page.nfree == 4 assert page.freeblock == pageaddr + hdrsize + 2*WORD assert page.freeblock.address[0] == pageaddr + hdrsize + 6*WORD @@ -352,14 +365,14 @@ pageaddr + hdrsize + 10*WORD assert page.freeblock.address[0].address[0].address[0] == \ pageaddr + hdrsize + 14*WORD - assert ac.free_pages == NULL + assert freepages(ac) == NULL assert ac.full_page_for_size[2] == PAGE_NULL def test_mass_free_half_page_becomes_more_free(): pagesize = hdrsize + 24*WORD ac = arena_collection_for_test(pagesize, "/", fill_with_objects=2) page = getpage(ac, 0) - assert page.nuninitialized == 4 + assert ac._nuninitialized(page, 2) == 4 assert page.nfree == 4 # ok_to_free = OkToFree(ac, 0.5) @@ -372,7 +385,7 @@ pageaddr = pagenum(ac, 0) assert page == ac.page_for_size[2] assert page.nextpage == PAGE_NULL - assert page.nuninitialized == 4 + assert ac._nuninitialized(page, 2) == 4 assert page.nfree == 6 fb = page.freeblock assert fb == pageaddr + hdrsize + 2*WORD @@ -384,7 +397,7 @@ pageaddr + hdrsize + 12*WORD assert fb.address[0].address[0].address[0].address[0].address[0] == \ pageaddr + hdrsize + 14*WORD - assert ac.free_pages == NULL + assert freepages(ac) == NULL assert ac.full_page_for_size[2] == PAGE_NULL # ____________________________________________________________ @@ -392,17 +405,29 @@ def test_random(): import random pagesize = hdrsize + 24*WORD - num_pages = 28 + num_pages = 3 ac = arena_collection_for_test(pagesize, " " * num_pages) live_objects = {} # - # Run the test until ac.allocate_new_arena() is called. + # Run the test until three arenas are freed. This is a quick test + # that the arenas are really freed by the logic. class DoneTesting(Exception): - pass - def done_testing(): - raise DoneTesting - ac.allocate_new_arena = done_testing - # + counter = 0 + def my_allocate_new_arena(): + # the following output looks cool on a 112-character-wide terminal. + lst = sorted(ac._all_arenas(), key=lambda a: a.base.arena._arena_index) + for a in lst: + print a.base.arena, a.base.arena.usagemap + print '-' * 80 + ac.__class__.allocate_new_arena(ac) + a = ac.current_arena.base.arena + def my_mark_freed(): + a.freed = True + DoneTesting.counter += 1 + if DoneTesting.counter > 3: + raise DoneTesting + a.mark_freed = my_mark_freed + ac.allocate_new_arena = my_allocate_new_arena try: while True: # @@ -410,12 +435,13 @@ for i in range(random.randrange(50, 100)): size_class = random.randrange(1, 7) obj = ac.malloc(size_class * WORD) - at = obj - ac._startpageaddr + at = (obj.arena, obj.offset) assert at not in live_objects live_objects[at] = size_class * WORD # # Free half the objects, randomly - ok_to_free = OkToFree(ac, lambda obj: random.random() < 0.5) + ok_to_free = OkToFree(ac, lambda obj: random.random() < 0.5, + multiarenas=True) ac.mass_free(ok_to_free) # # Check that we have seen all objects @@ -428,5 +454,4 @@ surviving_total_size += live_objects[at] assert ac.total_memory_used == surviving_total_size except DoneTesting: - # the following output looks cool on a 112-character-wide terminal. - print ac._startpageaddr.arena.usagemap + pass From afa at codespeak.net Wed Sep 29 13:06:09 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Wed, 29 Sep 2010 13:06:09 +0200 (CEST) Subject: [pypy-svn] r77465 - in pypy/branch/fast-forward/pypy/objspace/std: . test Message-ID: <20100929110609.DD288282B9D@codespeak.net> Author: afa Date: Wed Sep 29 13:06:08 2010 New Revision: 77465 Modified: pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py pypy/branch/fast-forward/pypy/objspace/std/test/test_bytes.py Log: Implement comparison between bytearray and str Modified: pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py Wed Sep 29 13:06:08 2010 @@ -6,6 +6,7 @@ from pypy.rlib.rarithmetic import intmask from pypy.rlib.rstring import StringBuilder from pypy.objspace.std.intobject import W_IntObject +from pypy.objspace.std.stringobject import W_StringObject from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice from pypy.objspace.std import slicetype from pypy.interpreter import gateway @@ -100,6 +101,16 @@ return space.w_False return space.w_True +def eq__Bytearray_String(space, w_bytearray1, w_string2): + data1 = w_bytearray1.data + data2 = w_string2._value + if len(data1) != len(data2): + return space.w_False + for i in range(len(data1)): + if data1[i] != data2[i]: + return space.w_False + return space.w_True + def _min(a, b): if a < b: return a Modified: pypy/branch/fast-forward/pypy/objspace/std/test/test_bytes.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/test/test_bytes.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/test/test_bytes.py Wed Sep 29 13:06:08 2010 @@ -41,3 +41,10 @@ assert bytearray('hello') < bytearray('world') assert bytearray('world') > bytearray('hello') + def test_compare_str(self): + assert bytearray('hello') == 'hello' + assert 'hello' == bytearray('hello') + # unicode is always different + assert bytearray('hello') != u'hello' + assert u'hello' != bytearray('hello') + From antocuni at codespeak.net Wed Sep 29 13:45:32 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Wed, 29 Sep 2010 13:45:32 +0200 (CEST) Subject: [pypy-svn] r77466 - in pypy/branch/jitffi/pypy: jit/codewriter jit/metainterp jit/metainterp/optimizeopt jit/metainterp/test rlib Message-ID: <20100929114532.CCF70282B9D@codespeak.net> Author: antocuni Date: Wed Sep 29 13:45:31 2010 New Revision: 77466 Modified: pypy/branch/jitffi/pypy/jit/codewriter/support.py pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py pypy/branch/jitffi/pypy/jit/metainterp/resoperation.py pypy/branch/jitffi/pypy/jit/metainterp/test/test_direct_call.py pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/jitffi/pypy/rlib/clibffi.py pypy/branch/jitffi/pypy/rlib/libffi.py Log: - remove rop.CALL_C: instead, we will use a plain rop.CALL. - call a special stub prepare_call, to teach in advance the optimizer about the signature of the function we are calling - move test(s) to its own class Modified: pypy/branch/jitffi/pypy/jit/codewriter/support.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/codewriter/support.py (original) +++ pypy/branch/jitffi/pypy/jit/codewriter/support.py Wed Sep 29 13:45:31 2010 @@ -222,6 +222,9 @@ # libffi support # -------------- +def _ll_4_libffi_prepare_call(llfunc, symfunc, args, result): + pass + def _ll_2_libffi_push_arg(llfunc, value): from pypy.rlib.libffi import FuncPtr func = cast_base_ptr_to_instance(FuncPtr, llfunc) @@ -234,6 +237,7 @@ # XXX: should be RES_TP, but it doesn't work + # in the following calls to builtins, the JIT is allowed to look inside: inline_calls_to = [ ('int_floordiv_ovf_zer', [lltype.Signed, lltype.Signed], lltype.Signed), Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py Wed Sep 29 13:45:31 2010 @@ -50,7 +50,8 @@ for push_op in self.func_args[funcval]: argval = self.getvalue(push_op.getarg(2)) arglist.append(argval.force_box()) - newop = ResOperation(rop.CALL_C, arglist, op.result, None) + # XXX: add the descr + newop = ResOperation(rop.CALL, arglist, op.result, None) del self.func_args[funcval] return newop Modified: pypy/branch/jitffi/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/resoperation.py Wed Sep 29 13:45:31 2010 @@ -459,7 +459,6 @@ 'COND_CALL_GC_WB/2d', # [objptr, newvalue] (for the write barrier) 'DEBUG_MERGE_POINT/1', # debugging only 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend - 'CALL_C/*', # call directly C code from here (a function addres comes first) '_CANRAISE_FIRST', # ----- start of can_raise operations ----- 'CALL/*d', Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_direct_call.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/test/test_direct_call.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_direct_call.py Wed Sep 29 13:45:31 2010 @@ -1,6 +1,6 @@ import py -from pypy.rlib.jit import JitDriver +from pypy.rlib.jit import JitDriver, hint from pypy.jit.metainterp.test.test_basic import LLJitMixin from pypy.rlib.clibffi import FuncPtr, CDLL, ffi_type_sint from pypy.rlib.libffi import IntArg, Func @@ -35,6 +35,7 @@ while n < 10: driver.jit_merge_point(n=n, func=func) driver.can_enter_jit(n=n, func=func) + func = hint(func, promote=True) arg0 = IntArg(n) arg1 = IntArg(1) arg0.next = arg1 Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py Wed Sep 29 13:45:31 2010 @@ -262,6 +262,10 @@ expected = self.parse(optops) print '\n'.join([str(o) for o in loop.operations]) self.assert_equal(loop, expected) + return loop + + +class OptimizeOptTest(BaseTestOptimizeOpt): def test_simple(self): ops = """ @@ -2643,7 +2647,7 @@ ''', rop.GUARD_TRUE) -class TestLLtype(BaseTestOptimizeOpt, LLtypeMixin): +class TestLLtype(OptimizeOptTest, LLtypeMixin): def test_residual_call_does_not_invalidate_caches(self): ops = """ @@ -3893,8 +3897,10 @@ """ self.optimize_loop(ops, 'Not, Not', expected) + +class TestJitFfi(OptimizeOptTest, LLtypeMixin): + def test_ffi_call(self): - # XXX: do we want to promote p0 and get rid of the getfield? ops = """ [p0, i1, f2] call("_libffi_prepare_call", p0, descr=plaincalldescr) @@ -3907,14 +3913,14 @@ expected = """ [p0, i1, f2] p3 = getfield_gc_pure(p0) - i4 = call_c(p3, i1, f2) + i4 = call(p3, i1, f2) jump(p0, i4, f2) """ - self.optimize_loop(ops, 'Not, Not, Not', expected) + loop = self.optimize_loop(ops, 'Not, Not, Not', expected) -##class TestOOtype(BaseTestOptimizeOpt, OOtypeMixin): +##class TestOOtype(OptimizeOptTest, OOtypeMixin): ## def test_instanceof(self): ## ops = """ Modified: pypy/branch/jitffi/pypy/rlib/clibffi.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/clibffi.py (original) +++ pypy/branch/jitffi/pypy/rlib/clibffi.py Wed Sep 29 13:45:31 2010 @@ -471,7 +471,7 @@ ll_args = lltype.nullptr(rffi.VOIDPP.TO) ll_result = lltype.nullptr(rffi.VOIDP.TO) - _immutable_fields_ = ['funcsym'] # XXX probably more + _immutable_fields_ = ['funcsym', 'argtypes'] # XXX probably more def __init__(self, name, argtypes, restype, funcsym, flags=FUNCFLAG_CDECL, keepalive=None): Modified: pypy/branch/jitffi/pypy/rlib/libffi.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/libffi.py (original) +++ pypy/branch/jitffi/pypy/rlib/libffi.py Wed Sep 29 13:45:31 2010 @@ -1,8 +1,8 @@ - from pypy.rlib.clibffi import * from pypy.rlib.objectmodel import specialize from pypy.rlib import jit + class AbstractArg(object): next = None @@ -28,15 +28,21 @@ class Func(object): + _immutable_ = True + def __init__(self, funcptr): - # XXX: for now, this is just a wrapper around libffi.FuncPtr, but in + # XXX: for now, this is just a wrapper around clibffi.FuncPtr, but in # the future it will replace it completely self.funcptr = funcptr + def _prepare(self, funcsym, argtypes, restype): + pass + _prepare.oopspec = 'libffi_prepare_call(self, funcsym, argtypes, restype)' + @jit.unroll_safe @specialize.arg(2) def call(self, argchain, RESULT): - # implementation detail + self._prepare(self.funcptr.funcsym, self.funcptr.argtypes, self.funcptr.restype) arg = argchain while arg: arg.push(self.funcptr) From antocuni at codespeak.net Wed Sep 29 13:53:13 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Wed, 29 Sep 2010 13:53:13 +0200 (CEST) Subject: [pypy-svn] r77467 - in pypy/branch/jitffi/pypy/jit/metainterp: optimizeopt test Message-ID: <20100929115313.81103282B9D@codespeak.net> Author: antocuni Date: Wed Sep 29 13:53:11 2010 New Revision: 77467 Added: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/fficall.py - copied, changed from r77466, pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py Removed: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/__init__.py pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py Log: rename ccall.py to fficall.py Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/__init__.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/__init__.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/__init__.py Wed Sep 29 13:53:11 2010 @@ -3,7 +3,7 @@ from pypy.jit.metainterp.optimizeopt.intbounds import OptIntBounds from pypy.jit.metainterp.optimizeopt.virtualize import OptVirtualize from pypy.jit.metainterp.optimizeopt.heap import OptHeap -from ccall import OptCCall +from pypy.jit.metainterp.optimizeopt.fficall import OptFfiCall def optimize_loop_1(metainterp_sd, loop, virtuals=True): """Optimize loop.operations to make it match the input of loop.specnodes @@ -15,7 +15,7 @@ OptRewrite(), OptVirtualize(), OptHeap(), - OptCCall(), + OptFfiCall(), ] optimizer = Optimizer(metainterp_sd, loop, optimizations, virtuals) optimizer.propagate_all_forward() Copied: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/fficall.py (from r77466, pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py) ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ccall.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/fficall.py Wed Sep 29 13:53:11 2010 @@ -2,7 +2,7 @@ from pypy.jit.metainterp.optimizeutil import _findall from pypy.jit.metainterp.optimizeopt.optimizer import Optimization -class OptCCall(Optimization): +class OptFfiCall(Optimization): def __init__(self): self.func_args = {} @@ -64,4 +64,4 @@ else: self.emit_operation(op) -optimize_ops = _findall(OptCCall, 'optimize_') +optimize_ops = _findall(OptFfiCall, 'optimize_') Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py Wed Sep 29 13:53:11 2010 @@ -3898,7 +3898,7 @@ self.optimize_loop(ops, 'Not, Not', expected) -class TestJitFfi(OptimizeOptTest, LLtypeMixin): +class TestFfiCall(OptimizeOptTest, LLtypeMixin): def test_ffi_call(self): ops = """ From antocuni at codespeak.net Wed Sep 29 16:47:52 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Wed, 29 Sep 2010 16:47:52 +0200 (CEST) Subject: [pypy-svn] r77468 - in pypy/branch/jitffi/pypy: jit/codewriter jit/metainterp/optimizeopt jit/metainterp/test rlib Message-ID: <20100929144752.0DAF1282B9D@codespeak.net> Author: antocuni Date: Wed Sep 29 16:47:50 2010 New Revision: 77468 Modified: pypy/branch/jitffi/pypy/jit/codewriter/support.py pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/fficall.py pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/jitffi/pypy/rlib/clibffi.py pypy/branch/jitffi/pypy/rlib/libffi.py Log: refactor a bit everything. The idea is that all the relevant oopspec should be attached only to methofs of the new libffi.Func, because then it's easier for the optimizer to keep track of which function we are referring to. Modified: pypy/branch/jitffi/pypy/jit/codewriter/support.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/codewriter/support.py (original) +++ pypy/branch/jitffi/pypy/jit/codewriter/support.py Wed Sep 29 16:47:50 2010 @@ -222,19 +222,19 @@ # libffi support # -------------- -def _ll_4_libffi_prepare_call(llfunc, symfunc, args, result): +def _ll_1_libffi_prepare_call(func): pass def _ll_2_libffi_push_arg(llfunc, value): - from pypy.rlib.libffi import FuncPtr - func = cast_base_ptr_to_instance(FuncPtr, llfunc) - return func.push_arg(value) - -def _ll_3_libffi_call(llfunc, symfunc, RES_TP): - from pypy.rlib.libffi import FuncPtr - func = cast_base_ptr_to_instance(FuncPtr, llfunc) - return func.call(symfunc, lltype.Signed) -# XXX: should be RES_TP, but it doesn't work + from pypy.rlib.libffi import Func + func = cast_base_ptr_to_instance(Func, llfunc) + return func._push_arg(value) + +def _ll_3_libffi_call(llfunc, funcsym, RESULT): + from pypy.rlib.libffi import Func + func = cast_base_ptr_to_instance(Func, llfunc) + return func._do_call(funcsym, lltype.Signed) +# XXX: should be RESULT, but it doesn't work Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/fficall.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/fficall.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/fficall.py Wed Sep 29 16:47:50 2010 @@ -1,11 +1,21 @@ +from pypy.rpython.annlowlevel import cast_base_ptr_to_instance +from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.libffi import Func from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.optimizeutil import _findall from pypy.jit.metainterp.optimizeopt.optimizer import Optimization +class FuncDescription(object): + + def __init__(self, cpu, func): + self.func = func + self.args = [] + + class OptFfiCall(Optimization): def __init__(self): - self.func_args = {} + self.funcs = {} def get_oopspec(self, funcval): # XXX: not RPython at all, just a hack while waiting to have an @@ -20,8 +30,8 @@ return None def optimize_CALL(self, op): - funcval = self.getvalue(op.getarg(0)) - oopspec = self.get_oopspec(funcval) + targetval = self.getvalue(op.getarg(0)) + oopspec = self.get_oopspec(targetval) if oopspec == 'prepare_call': self.do_prepare_call(op) return @@ -32,27 +42,43 @@ op = self.do_call(op) self.emit_operation(op) - def do_prepare_call(self, op): + def _cast_to_high_level(self, Class, obj): + if we_are_translated(): + XXX + else: + # this is just for the tests in test_optimizeopt.py + cls = getattr(obj, '_fake_class', obj.__class__) + assert issubclass(cls, Class) + return obj + + def _get_func(self, op): funcval = self.getvalue(op.getarg(1)) - assert funcval not in self.func_args - self.func_args[funcval] = [] + assert funcval.is_constant() # XXX: do something nice if it's not constant + llfunc = funcval.box.getref_base() + func = self._cast_to_high_level(Func, llfunc) + return func + + def do_prepare_call(self, op): + func = self._get_func(op) + assert func not in self.funcs # XXX: do something nice etc. etc. + self.funcs[func] = FuncDescription(self.optimizer.cpu, func) def do_push_arg(self, op): - # we store the op in func_args because we might want to emit it later, + # we store the op in funcs because we might want to emit it later, # in case we give up with the optimization - funcval = self.getvalue(op.getarg(1)) - self.func_args[funcval].append(op) + func = self._get_func(op) + self.funcs[func].args.append(op) def do_call(self, op): - funcval = self.getvalue(op.getarg(1)) + func = self._get_func(op) funcsymval = self.getvalue(op.getarg(2)) arglist = [funcsymval.force_box()] - for push_op in self.func_args[funcval]: + for push_op in self.funcs[func].args: argval = self.getvalue(push_op.getarg(2)) arglist.append(argval.force_box()) # XXX: add the descr newop = ResOperation(rop.CALL, arglist, op.result, None) - del self.func_args[funcval] + del self.funcs[func] return newop def propagate_forward(self, op): Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py Wed Sep 29 16:47:50 2010 @@ -3898,25 +3898,45 @@ self.optimize_loop(ops, 'Not, Not', expected) +# ------------------------------------------------ +from pypy.rpython.lltypesystem import llmemory +from pypy.rlib.libffi import Func + +class FakeLLObject(object): + + def __init__(self, **kwds): + self.__dict__.update(kwds) + self._TYPE = llmemory.GCREF + + def _identityhash(self): + return id(self) + class TestFfiCall(OptimizeOptTest, LLtypeMixin): + class namespace: + cpu = LLtypeMixin.cpu + plaincalldescr = LLtypeMixin.plaincalldescr + funcptr = FakeLLObject() + func = FakeLLObject(_fake_class=Func) + + namespace = namespace.__dict__ + + def test_ffi_call(self): ops = """ - [p0, i1, f2] - call("_libffi_prepare_call", p0, descr=plaincalldescr) - call("_libffi_push_arg_Signed", p0, i1, descr=plaincalldescr) - call("_libffi_push_arg_Float", p0, f2, descr=plaincalldescr) - p3 = getfield_gc_pure(p0) # funcsym - i4 = call("_libffi_call", p0, p3, descr=plaincalldescr) - jump(p0, i4, f2) + [i0, f1] + call("_libffi_prepare_call", ConstPtr(func), descr=plaincalldescr) + call("_libffi_push_arg_Signed", ConstPtr(func), i0, descr=plaincalldescr) + call("_libffi_push_arg_Float", ConstPtr(func), f1, descr=plaincalldescr) + i3 = call("_libffi_call", ConstPtr(func), 1, descr=plaincalldescr) + jump(i3, f1) """ expected = """ - [p0, i1, f2] - p3 = getfield_gc_pure(p0) - i4 = call(p3, i1, f2) - jump(p0, i4, f2) + [i0, f1] + i3 = call(1, i0, f1) + jump(i3, f1) """ - loop = self.optimize_loop(ops, 'Not, Not, Not', expected) + loop = self.optimize_loop(ops, 'Not, Not', expected) Modified: pypy/branch/jitffi/pypy/rlib/clibffi.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/clibffi.py (original) +++ pypy/branch/jitffi/pypy/rlib/clibffi.py Wed Sep 29 16:47:50 2010 @@ -515,7 +515,7 @@ # doesn't work correctly with mixing non-negative and normal integers push_arg._annenforceargs_ = [None, int] #push_arg._annspecialcase_ = 'specialize:argtype(1)' - push_arg.oopspec = 'libffi_push_arg(self, value)' + #push_arg.oopspec = 'libffi_push_arg(self, value)' def _check_args(self): if self.pushed_args < self.argnum: @@ -524,7 +524,7 @@ def _clean_args(self): self.pushed_args = 0 - def call(self, funcsym, RES_TP): + def call(self, RES_TP): self._check_args() ffires = c_ffi_call(self.ll_cif, self.funcsym, rffi.cast(rffi.VOIDP, self.ll_result), @@ -537,8 +537,8 @@ self._clean_args() check_fficall_result(ffires, self.flags) return res - call._annspecialcase_ = 'specialize:arg(2)' - call.oopspec = 'libffi_call(self, funcsym, RES_TP)' + call._annspecialcase_ = 'specialize:arg(1)' + #call.oopspec = 'libffi_call(self, RES_TP)' def __del__(self): if self.ll_args: Modified: pypy/branch/jitffi/pypy/rlib/libffi.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/libffi.py (original) +++ pypy/branch/jitffi/pypy/rlib/libffi.py Wed Sep 29 16:47:50 2010 @@ -13,8 +13,8 @@ def __init__(self, intval): self.intval = intval - def push(self, funcptr): - funcptr.push_arg(self.intval) + def push(self, func): + func._push_arg(self.intval) class FloatArg(AbstractArg): """ An argument holding a float @@ -23,28 +23,45 @@ def __init__(self, floatval): self.floatval = floatval - def push(self, funcptr): - funcptr.push_arg(self.floatval) + def push(self, func): + func._push_arg(self.floatval) class Func(object): - _immutable_ = True + _immutable_fields_ = ['funcptr', 'funcsym', 'argtypes', 'restype'] def __init__(self, funcptr): # XXX: for now, this is just a wrapper around clibffi.FuncPtr, but in # the future it will replace it completely self.funcptr = funcptr + self.funcsym = funcptr.funcsym + self.argtypes = funcptr.argtypes + self.restype = funcptr.restype - def _prepare(self, funcsym, argtypes, restype): + def _prepare(self): pass - _prepare.oopspec = 'libffi_prepare_call(self, funcsym, argtypes, restype)' + _prepare.oopspec = 'libffi_prepare_call(self)' + + def _push_arg(self, value): + self.funcptr.push_arg(value) + # XXX this is bad, fix it somehow in the future, but specialize:argtype + # doesn't work correctly with mixing non-negative and normal integers + _push_arg._annenforceargs_ = [None, int] + #push_arg._annspecialcase_ = 'specialize:argtype(1)' + _push_arg.oopspec = 'libffi_push_arg(self, value)' + + def _do_call(self, funcsym, RESULT): + return self.funcptr.call(RESULT) + _do_call._annspecialcase_ = 'specialize:arg(1)' + _do_call.oopspec = 'libffi_call(self, funcsym, RESULT)' @jit.unroll_safe @specialize.arg(2) def call(self, argchain, RESULT): - self._prepare(self.funcptr.funcsym, self.funcptr.argtypes, self.funcptr.restype) + self._prepare() arg = argchain while arg: - arg.push(self.funcptr) + arg.push(self) arg = arg.next - return self.funcptr.call(self.funcptr.funcsym, RESULT) + #return self.funcptr.call(RESULT) + return self._do_call(self.funcsym, RESULT) From fijal at codespeak.net Wed Sep 29 17:12:30 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 29 Sep 2010 17:12:30 +0200 (CEST) Subject: [pypy-svn] r77469 - pypy/trunk/pypy/jit/codewriter Message-ID: <20100929151230.A3773282B9D@codespeak.net> Author: fijal Date: Wed Sep 29 17:12:29 2010 New Revision: 77469 Modified: pypy/trunk/pypy/jit/codewriter/effectinfo.py Log: Fix translation in tests Modified: pypy/trunk/pypy/jit/codewriter/effectinfo.py ============================================================================== --- pypy/trunk/pypy/jit/codewriter/effectinfo.py (original) +++ pypy/trunk/pypy/jit/codewriter/effectinfo.py Wed Sep 29 17:12:29 2010 @@ -129,14 +129,17 @@ # ____________________________________________________________ -_callinfo_for_oopspec = {} # {oopspecindex: (calldescr, func_as_int)} +_callinfo_for_oopspec = {} # {oopspecindex: (calldescr, func_as_int)} def callinfo_for_oopspec(oopspecindex): """A function that returns the calldescr and the function address (as an int) of one of the OS_XYZ functions defined above. Don't use this if there might be several implementations of the same OS_XYZ specialized by type, e.g. OS_ARRAYCOPY.""" - return _callinfo_for_oopspec[oopspecindex] + try: + return _callinfo_for_oopspec[oopspecindex] + except KeyError: + return None def _funcptr_for_oopspec_memo(oopspecindex): From fijal at codespeak.net Wed Sep 29 17:23:51 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Wed, 29 Sep 2010 17:23:51 +0200 (CEST) Subject: [pypy-svn] r77470 - in pypy/trunk/pypy: jit/metainterp jit/metainterp/test rlib Message-ID: <20100929152351.102E8282B9D@codespeak.net> Author: fijal Date: Wed Sep 29 17:23:49 2010 New Revision: 77470 Added: pypy/trunk/pypy/jit/metainterp/optimize_nopspec.py pypy/trunk/pypy/jit/metainterp/test/test_loop_nopspec.py Modified: pypy/trunk/pypy/jit/metainterp/optimizefindnode.py pypy/trunk/pypy/jit/metainterp/warmstate.py pypy/trunk/pypy/rlib/jit.py Log: Have an optimization level which is normal, but does not perform perfect spec. Added: pypy/trunk/pypy/jit/metainterp/optimize_nopspec.py ============================================================================== --- (empty file) +++ pypy/trunk/pypy/jit/metainterp/optimize_nopspec.py Wed Sep 29 17:23:49 2010 @@ -0,0 +1,41 @@ + +from pypy.rlib.debug import debug_start, debug_stop +from pypy.jit.metainterp.optimizeopt import optimize_loop_1, optimize_bridge_1 +from pypy.jit.metainterp.optimizefindnode import PerfectSpecializationFinder +from pypy.jit.metainterp.optimizefindnode import BridgeSpecializationFinder + +def optimize_loop(metainterp_sd, old_loop_tokens, loop): + debug_start("jit-optimize") + try: + return _optimize_loop(metainterp_sd, old_loop_tokens, loop) + finally: + debug_stop("jit-optimize") + +def _optimize_loop(metainterp_sd, old_loop_tokens, loop): + cpu = metainterp_sd.cpu + metainterp_sd.logger_noopt.log_loop(loop.inputargs, loop.operations) + finder = PerfectSpecializationFinder(cpu) + finder.find_nodes_loop(loop, False) + if old_loop_tokens: + return old_loop_tokens[0] + optimize_loop_1(metainterp_sd, loop) + return None + +def optimize_bridge(metainterp_sd, old_loop_tokens, bridge): + debug_start("jit-optimize") + try: + return _optimize_bridge(metainterp_sd, old_loop_tokens, bridge) + finally: + debug_stop("jit-optimize") + +def _optimize_bridge(metainterp_sd, old_loop_tokens, bridge): + cpu = metainterp_sd.cpu + metainterp_sd.logger_noopt.log_loop(bridge.inputargs, bridge.operations) + finder = BridgeSpecializationFinder(cpu) + finder.find_nodes_bridge(bridge) + if old_loop_tokens: + old_loop_token = old_loop_tokens[0] + bridge.operations[-1].setdescr(old_loop_token) # patch jump target + optimize_bridge_1(metainterp_sd, bridge) + return old_loop_token + return None Modified: pypy/trunk/pypy/jit/metainterp/optimizefindnode.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizefindnode.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizefindnode.py Wed Sep 29 17:23:49 2010 @@ -319,11 +319,12 @@ class PerfectSpecializationFinder(NodeFinder): node_fromstart = InstanceNode(fromstart=True) - def find_nodes_loop(self, loop): + def find_nodes_loop(self, loop, build_specnodes=True): self._loop = loop self.setup_input_nodes(loop.inputargs) self.find_nodes(loop.operations) - self.build_result_specnodes(loop) + if build_specnodes: + self.build_result_specnodes(loop) def show(self): from pypy.jit.metainterp.viewnode import viewnodes, view Added: pypy/trunk/pypy/jit/metainterp/test/test_loop_nopspec.py ============================================================================== --- (empty file) +++ pypy/trunk/pypy/jit/metainterp/test/test_loop_nopspec.py Wed Sep 29 17:23:49 2010 @@ -0,0 +1,27 @@ + +from pypy.jit.metainterp.test import test_loop, test_send +from pypy.jit.metainterp.warmspot import ll_meta_interp +from pypy.rlib.jit import OPTIMIZER_NO_PERFECTSPEC +from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin + +class LoopNoPSpecTest(test_send.SendTests): + def meta_interp(self, func, args, **kwds): + return ll_meta_interp(func, args, optimizer=OPTIMIZER_NO_PERFECTSPEC, + CPUClass=self.CPUClass, + type_system=self.type_system, + **kwds) + + def check_loops(self, *args, **kwds): + pass + + def check_loop_count(self, count): + pass + + def check_jumps(self, maxcount): + pass + +class TestLLtype(LoopNoPSpecTest, LLJitMixin): + pass + +class TestOOtype(LoopNoPSpecTest, OOJitMixin): + pass Modified: pypy/trunk/pypy/jit/metainterp/warmstate.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/warmstate.py (original) +++ pypy/trunk/pypy/jit/metainterp/warmstate.py Wed Sep 29 17:23:49 2010 @@ -7,7 +7,8 @@ from pypy.rlib.rarithmetic import intmask from pypy.rlib.nonconst import NonConstant from pypy.rlib.unroll import unrolling_iterable -from pypy.rlib.jit import PARAMETERS, OPTIMIZER_SIMPLE, OPTIMIZER_FULL +from pypy.rlib.jit import (PARAMETERS, OPTIMIZER_SIMPLE, OPTIMIZER_FULL, + OPTIMIZER_NO_PERFECTSPEC) from pypy.rlib.jit import DEBUG_PROFILE from pypy.rlib.jit import BaseJitCell from pypy.rlib.debug import debug_start, debug_stop, debug_print @@ -190,6 +191,10 @@ from pypy.jit.metainterp import simple_optimize self.optimize_loop = simple_optimize.optimize_loop self.optimize_bridge = simple_optimize.optimize_bridge + elif optimizer == OPTIMIZER_NO_PERFECTSPEC: + from pypy.jit.metainterp import optimize_nopspec + self.optimize_loop = optimize_nopspec.optimize_loop + self.optimize_bridge = optimize_nopspec.optimize_bridge elif optimizer == OPTIMIZER_FULL: from pypy.jit.metainterp import optimize self.optimize_loop = optimize.optimize_loop Modified: pypy/trunk/pypy/rlib/jit.py ============================================================================== --- pypy/trunk/pypy/rlib/jit.py (original) +++ pypy/trunk/pypy/rlib/jit.py Wed Sep 29 17:23:49 2010 @@ -224,7 +224,8 @@ """Inconsistency in the JIT hints.""" OPTIMIZER_SIMPLE = 0 -OPTIMIZER_FULL = 1 +OPTIMIZER_NO_PERFECTSPEC = 1 +OPTIMIZER_FULL = 2 DEBUG_OFF = 0 DEBUG_PROFILE = 1 From afa at codespeak.net Wed Sep 29 18:53:55 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Wed, 29 Sep 2010 18:53:55 +0200 (CEST) Subject: [pypy-svn] r77471 - in pypy/branch/fast-forward/pypy/objspace/std: . test Message-ID: <20100929165355.8D01A282BD6@codespeak.net> Author: afa Date: Wed Sep 29 18:53:53 2010 New Revision: 77471 Modified: pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py pypy/branch/fast-forward/pypy/objspace/std/bytearraytype.py pypy/branch/fast-forward/pypy/objspace/std/model.py pypy/branch/fast-forward/pypy/objspace/std/test/test_bytes.py Log: Reuse string multimethods for the bytearray object. Delegation (bytearray->str) works nicely, except that some functions still have to return a bytearray. Modified: pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py Wed Sep 29 18:53:53 2010 @@ -7,6 +7,7 @@ from pypy.rlib.rstring import StringBuilder from pypy.objspace.std.intobject import W_IntObject from pypy.objspace.std.stringobject import W_StringObject +from pypy.objspace.std.unicodeobject import W_UnicodeObject from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice from pypy.objspace.std import slicetype from pypy.interpreter import gateway @@ -101,14 +102,30 @@ return space.w_False return space.w_True -def eq__Bytearray_String(space, w_bytearray1, w_string2): - data1 = w_bytearray1.data - data2 = w_string2._value - if len(data1) != len(data2): - return space.w_False - for i in range(len(data1)): - if data1[i] != data2[i]: - return space.w_False +# bytearray-to-string delegation +def delegate_Bytearray2String(space, w_bytearray): + return W_StringObject(''.join(w_bytearray.data)) + +def String2Bytearray(space, w_str): + data = [c for c in space.str_w(w_str)] + return W_BytearrayObject(data) + +def eq__Bytearray_String(space, w_bytearray, w_other): + return space.eq(delegate_Bytearray2String(space, w_bytearray), w_other) + +def eq__Bytearray_Unicode(space, w_bytearray, w_other): + return space.w_False + +def eq__Unicode_Bytearray(space, w_other, w_bytearray): + return space.w_False + +def ne__Bytearray_String(space, w_bytearray, w_other): + return space.ne(delegate_Bytearray2String(space, w_bytearray), w_other) + +def ne__Bytearray_Unicode(space, w_bytearray, w_other): + return space.w_True + +def ne__Unicode_Bytearray(space, w_other, w_bytearray): return space.w_True def _min(a, b): @@ -171,19 +188,10 @@ return space.wrap(buf.build()) -def bytearray_count__Bytearray_Int(space, w_bytearray, w_char): - char = w_char.intval - count = 0 - for c in w_bytearray.data: - if ord(c) == char: - count += 1 - return space.wrap(count) - -def bytearray_index__Bytearray_Int_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - char = w_char.intval +def _convert_idx_params(space, w_self, w_start, w_stop): start = slicetype._Eval_SliceIndex(space, w_start) stop = slicetype._Eval_SliceIndex(space, w_stop) - length = len(w_bytearray.data) + length = len(w_self.data) if start < 0: start += length if start < 0: @@ -192,6 +200,21 @@ stop += length if stop < 0: stop = 0 + return start, stop, length + +def str_count__Bytearray_Int_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): + char = w_char.intval + start, stop, length = _convert_idx_params(space, w_bytearray, w_start, w_stop) + count = 0 + for i in range(start, min(stop, length)): + c = w_bytearray.data[i] + if ord(c) == char: + count += 1 + return space.wrap(count) + +def str_index__Bytearray_Int_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): + char = w_char.intval + start, stop, length = _convert_idx_params(space, w_bytearray, w_start, w_stop) for i in range(start, min(stop, length)): c = w_bytearray.data[i] if ord(c) == char: @@ -199,5 +222,17 @@ raise OperationError(space.w_ValueError, space.wrap("bytearray.index(x): x not in bytearray")) +# These methods could just delegate to the string implementation, +# but they have to return a bytearray. +def str_zfill__Bytearray_ANY(space, w_bytearray, w_width): + w_str = delegate_Bytearray2String(space, w_bytearray) + w_res = space.call_method(w_str, "zfill", w_width) + return String2Bytearray(space, w_res) + +def str_expandtabs__Bytearray_ANY(space, w_bytearray, w_tabsize): + w_str = delegate_Bytearray2String(space, w_bytearray) + w_res = space.call_method(w_str, "expandtabs", w_tabsize) + return String2Bytearray(space, w_res) + from pypy.objspace.std import bytearraytype register_all(vars(), bytearraytype) Modified: pypy/branch/fast-forward/pypy/objspace/std/bytearraytype.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/bytearraytype.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/bytearraytype.py Wed Sep 29 18:53:53 2010 @@ -4,17 +4,10 @@ from pypy.objspace.std.register_all import register_all from pypy.objspace.std.stdtypedef import StdTypeDef, SMM, no_hash_descr - -bytearray_count = SMM( - "count", 2, - doc="B.count(sub [,start [,end]]) -> int\n" - "Return the number of non-overlapping occurrences of subsection sub in\n" - "bytes B[start:end]. Optional arguments start and end are interpreted\n" - "as in slice notation.") - -bytearray_index = SMM("index", 4, defaults=(0, sys.maxint), - doc="index(obj, [start, [stop]]) -> first index that obj " - "appears in the bytearray") +from pypy.objspace.std.stringtype import str_islower, str_isupper +from pypy.objspace.std.stringtype import str_count, str_index +from pypy.objspace.std.stringtype import str_expandtabs, str_zfill +from pypy.objspace.std.stringtype import str_splitlines @gateway.unwrap_spec(ObjSpace, W_Root, W_Root, W_Root, W_Root) def descr__new__(space, w_bytearraytype, Modified: pypy/branch/fast-forward/pypy/objspace/std/model.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/model.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/model.py Wed Sep 29 18:53:53 2010 @@ -210,6 +210,9 @@ self.typeorder[ropeobject.W_RopeObject] += [ (unicodeobject.W_UnicodeObject, unicodeobject.delegate_String2Unicode), ] + self.typeorder[bytearrayobject.W_BytearrayObject] += [ + (stringobject.W_StringObject, bytearrayobject.delegate_Bytearray2String), + ] if config.objspace.std.withstrslice: self.typeorder[strsliceobject.W_StringSliceObject] += [ Modified: pypy/branch/fast-forward/pypy/objspace/std/test/test_bytes.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/test/test_bytes.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/test/test_bytes.py Wed Sep 29 18:53:53 2010 @@ -42,9 +42,34 @@ assert bytearray('world') > bytearray('hello') def test_compare_str(self): - assert bytearray('hello') == 'hello' - assert 'hello' == bytearray('hello') + assert bytearray('hello1') == 'hello1' + assert not (bytearray('hello1') != 'hello1') + assert 'hello2' == bytearray('hello2') + assert not ('hello1' != bytearray('hello1')) # unicode is always different - assert bytearray('hello') != u'hello' - assert u'hello' != bytearray('hello') + assert not (bytearray('hello3') == unicode('world')) + assert bytearray('hello3') != unicode('hello3') + assert unicode('hello3') != bytearray('world') + assert unicode('hello4') != bytearray('hello4') + assert not (bytearray('') == u'') + assert not (u'' == bytearray('')) + assert bytearray('') != u'' + assert u'' != bytearray('') + + def test_stringlike_operations(self): + assert bytearray('hello').islower() + assert bytearray('HELLO').isupper() + + assert bytearray('hello').count('l') == 2 + assert bytearray('hello').count(bytearray('l')) == 2 + assert bytearray('hello').count(ord('l')) == 2 + + assert bytearray('hello').index('e') == 1 + assert bytearray('hello').count(bytearray('e')) == 1 + assert bytearray('hello').index(ord('e')) == 1 + + r = bytearray('1').zfill(5) + assert type(r) is bytearray and r == '00001' + r = bytearray('1\t2').expandtabs(5) + assert type(r) is bytearray and r == '1 2' From agaynor at codespeak.net Wed Sep 29 19:09:59 2010 From: agaynor at codespeak.net (agaynor at codespeak.net) Date: Wed, 29 Sep 2010 19:09:59 +0200 (CEST) Subject: [pypy-svn] r77472 - in pypy/trunk/pypy: jit/codewriter module/array/benchmark Message-ID: <20100929170959.5C92C282BE7@codespeak.net> Author: agaynor Date: Wed Sep 29 19:09:57 2010 New Revision: 77472 Modified: pypy/trunk/pypy/jit/codewriter/effectinfo.py pypy/trunk/pypy/module/array/benchmark/sumtst.c Log: Fix translation, hopefully. Modified: pypy/trunk/pypy/jit/codewriter/effectinfo.py ============================================================================== --- pypy/trunk/pypy/jit/codewriter/effectinfo.py (original) +++ pypy/trunk/pypy/jit/codewriter/effectinfo.py Wed Sep 29 19:09:57 2010 @@ -139,12 +139,12 @@ try: return _callinfo_for_oopspec[oopspecindex] except KeyError: - return None + return (None, 0) def _funcptr_for_oopspec_memo(oopspecindex): from pypy.jit.codewriter import heaptracker - _, func_as_int = _callinfo_for_oopspec.get(oopspecindex, (None, 0)) + _, func_as_int = callinfo_for_oopspec(oopspecindex) funcadr = heaptracker.int2adr(func_as_int) return funcadr.ptr _funcptr_for_oopspec_memo._annspecialcase_ = 'specialize:memo' Modified: pypy/trunk/pypy/module/array/benchmark/sumtst.c ============================================================================== --- pypy/trunk/pypy/module/array/benchmark/sumtst.c (original) +++ pypy/trunk/pypy/module/array/benchmark/sumtst.c Wed Sep 29 19:09:57 2010 @@ -1,3 +1,4 @@ +#include double sum(double *img); @@ -5,4 +6,4 @@ double *img=malloc(640*480*4*sizeof(double)); int sa=0; for (int l=0; l<500; l++) sum(img); -} +} From hakanardo at codespeak.net Wed Sep 29 19:36:46 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Wed, 29 Sep 2010 19:36:46 +0200 (CEST) Subject: [pypy-svn] r77473 - in pypy/branch/jit-loop-invaraints/pypy/jit/metainterp: . optimizeopt test Message-ID: <20100929173646.C4AB5282BD6@codespeak.net> Author: hakanardo Date: Wed Sep 29 19:36:45 2010 New Revision: 77473 Added: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py.try1 - copied unchanged from r77374, pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/resume.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_virtual.py Log: Made mess of trying to pass data from a loop to its bridges using VInvariantOpInfo :( Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/invariant.py Wed Sep 29 19:36:45 2010 @@ -3,7 +3,24 @@ from pypy.jit.metainterp.compile import prebuiltNotSpecNode from pypy.rlib.debug import debug_print from pypy.jit.metainterp.history import Const +from pypy.jit.metainterp.optimizeopt.virtualize import AbstractVirtualValue +class VInvariantOpValue(AbstractVirtualValue): + def setup(self, op): + self.op = op + v = self.optimizer.getvalue(self.keybox) + self.intbound = v.intbound + + def get_args_for_fail(self, modifier): + if self.box is None and not modifier.already_seen_virtual(self.keybox): + modifier.register_virtual_fields(self.keybox, [self.keybox]) + + def _make_virtual(self, modifier): + return modifier.make_vinvop(self.op) + + def force_box(self): + return self.keybox + class OptInvariant(Optimization): """Move loop invariant code into a preamble. """ @@ -74,6 +91,11 @@ box = v.force_box() if box and box not in self.invariant_boxes: self.invariant_boxes.append(box) + box = op.result + vvalue = VInvariantOpValue(self.optimizer, box, op) + vvalue.setup(op) + del self.optimizer.values[box] + self.make_equal_to(box, vvalue) return Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/optimizeopt/optimizer.py Wed Sep 29 19:36:45 2010 @@ -354,6 +354,7 @@ assert isinstance(descr, compile.ResumeGuardDescr) modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo) newboxes = modifier.finish(self.values, self.pendingfields) + debug_print('\ndescr:', descr) if len(newboxes) > self.metainterp_sd.options.failargs_limit: # XXX be careful here compile.giveup() descr.store_final_boxes(op, newboxes) Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/resume.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/resume.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/resume.py Wed Sep 29 19:36:45 2010 @@ -244,6 +244,9 @@ self.storage = storage self.memo = memo + def make_vinvop(self, op): + return VInvariantOpInfo(op) + def make_virtual(self, known_class, fielddescrs): return VirtualInfo(known_class, fielddescrs) @@ -313,6 +316,8 @@ storage.rd_consts = self.memo.consts dump_storage(storage, liveboxes) + debug_print('liveboxes:', liveboxes) + debug_print('liveboxes:', [l.__class__ for l in liveboxes]) return liveboxes[:] def _number_virtuals(self, liveboxes, values, num_env_virtuals): @@ -409,6 +414,11 @@ def debug_prints(self): raise NotImplementedError +class VFromStartInfo(AbstractVirtualInfo): + def __init__(self, box, idx=-1): + self.set_content([fieldnums]) + self.index = idx + class AbstractVirtualStructInfo(AbstractVirtualInfo): def __init__(self, fielddescrs): self.fielddescrs = fielddescrs @@ -440,6 +450,23 @@ debug_print("\tvirtualinfo", self.known_class.repr_rpython()) AbstractVirtualStructInfo.debug_prints(self) +class VInvariantOpInfo(AbstractVirtualInfo): + def __init__(self, op): + self.op = op + + @specialize.argtype(1) + def allocate(self, decoder): + #return decoder.allocate_op(self.op) + #raise + return None + + def debug_prints(self): + debug_print("\tVInvariantOpInfo", self.op) + + @specialize.argtype(1) + def setfields(self, decoder, array): + pass + class VStructInfo(AbstractVirtualStructInfo): def __init__(self, typedescr, fielddescrs): AbstractVirtualStructInfo.__init__(self, fielddescrs) @@ -536,6 +563,7 @@ self.cur_numb = self.cur_numb.prev def _callback_i(self, index, register_index): + print "I: ", self.cur_numb.nums[index] value = self.decode_int(self.cur_numb.nums[index]) self.write_an_int(register_index, value) @@ -614,6 +642,9 @@ def allocate_with_vtable(self, known_class): return self.metainterp.execute_and_record(rop.NEW_WITH_VTABLE, None, known_class) + #def allocate_op(self, op): + # return self.metainterp.execute_and_record_varargs( + # op.opnum, op.args, op.descr) def allocate_struct(self, typedescr): return self.metainterp.execute_and_record(rop.NEW, typedescr) Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_basic.py Wed Sep 29 19:36:45 2010 @@ -356,7 +356,7 @@ assert res == 919 self.check_loop_count(3) - def test_loop_invariant_mul_bridge_maintaining(self): + def test_loop_invariant_mul_bridge_maintaining1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) def f(x, y): res = 0 @@ -376,6 +376,27 @@ 'int_sub': 2, 'int_mul': 2, 'int_add': 2, 'jump': 3}) + def test_loop_invariant_mul_bridge_maintaining2(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + z = x * x + res += z + if y<8: + res += z + y -= 1 + return res + res = self.meta_interp(f, [6, 16]) + assert res == 828 + self.check_loop_count(3) + self.check_loops({'int_lt': 1, 'int_gt': 1, + 'guard_false': 1, 'guard_true': 1, + 'int_sub': 2, 'int_mul': 2, 'int_add': 2, + 'jump': 3}) + def test_loop_invariant_intbox(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) class I: Modified: pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_virtual.py ============================================================================== --- pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_virtual.py (original) +++ pypy/branch/jit-loop-invaraints/pypy/jit/metainterp/test/test_virtual.py Wed Sep 29 19:36:45 2010 @@ -12,7 +12,7 @@ def _freeze_(self): return True - def test_virtualized(self): + def test_virtualized1(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) def f(n): node = self._new() From agaynor at codespeak.net Wed Sep 29 23:48:03 2010 From: agaynor at codespeak.net (agaynor at codespeak.net) Date: Wed, 29 Sep 2010 23:48:03 +0200 (CEST) Subject: [pypy-svn] r77474 - pypy/trunk/pypy/rlib Message-ID: <20100929214803.03706282BD4@codespeak.net> Author: agaynor Date: Wed Sep 29 23:48:00 2010 New Revision: 77474 Modified: pypy/trunk/pypy/rlib/rstring.py Log: When doing str * big_int throw a MemoryError early if the resulting string's length is greater than sys.maxint. Modified: pypy/trunk/pypy/rlib/rstring.py ============================================================================== --- pypy/trunk/pypy/rlib/rstring.py (original) +++ pypy/trunk/pypy/rlib/rstring.py Wed Sep 29 23:48:00 2010 @@ -1,9 +1,10 @@ """ String builder interface and string functions """ -from pypy.rpython.extregistry import ExtRegistryEntry from pypy.annotation.model import SomeObject, SomeString, s_None,\ SomeChar, SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString +from pypy.rlib.rarithmetic import ovfcheck +from pypy.rpython.extregistry import ExtRegistryEntry # -------------- public API for string functions ----------------------- @@ -78,6 +79,11 @@ result = None factor = 1 assert mul > 0 + try: + ovfcheck(len(s) * mul) + except OverflowError: + raise MemoryError + limit = mul >> 1 while True: if mul & factor: From afa at codespeak.net Wed Sep 29 23:50:11 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Wed, 29 Sep 2010 23:50:11 +0200 (CEST) Subject: [pypy-svn] r77475 - pypy/branch/fast-forward/pypy/objspace/std/test Message-ID: <20100929215011.D1CCC282BD4@codespeak.net> Author: afa Date: Wed Sep 29 23:50:10 2010 New Revision: 77475 Modified: pypy/branch/fast-forward/pypy/objspace/std/test/test_bytes.py Log: Add basic comparison test Modified: pypy/branch/fast-forward/pypy/objspace/std/test/test_bytes.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/test/test_bytes.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/test/test_bytes.py Wed Sep 29 23:50:10 2010 @@ -38,6 +38,7 @@ assert list(bytearray('hello')) == [104, 101, 108, 108, 111] def test_compare(self): + assert bytearray('hello') == bytearray('hello') assert bytearray('hello') < bytearray('world') assert bytearray('world') > bytearray('hello') @@ -73,3 +74,4 @@ r = bytearray('1\t2').expandtabs(5) assert type(r) is bytearray and r == '1 2' + From agaynor at codespeak.net Wed Sep 29 23:51:31 2010 From: agaynor at codespeak.net (agaynor at codespeak.net) Date: Wed, 29 Sep 2010 23:51:31 +0200 (CEST) Subject: [pypy-svn] r77476 - pypy/trunk/pypy/rlib/test Message-ID: <20100929215131.1CC17282BD4@codespeak.net> Author: agaynor Date: Wed Sep 29 23:51:29 2010 New Revision: 77476 Modified: pypy/trunk/pypy/rlib/test/test_rstring.py Log: Added a test for r77474. Modified: pypy/trunk/pypy/rlib/test/test_rstring.py ============================================================================== --- pypy/trunk/pypy/rlib/test/test_rstring.py (original) +++ pypy/trunk/pypy/rlib/test/test_rstring.py Wed Sep 29 23:51:29 2010 @@ -1,5 +1,8 @@ +import sys + +from pypy.rlib.rstring import StringBuilder, UnicodeBuilder, split, rsplit, \ + string_repeat -from pypy.rlib.rstring import StringBuilder, UnicodeBuilder, split, rsplit def test_split(): assert split("", 'x') == [''] @@ -39,3 +42,6 @@ s.append_multiple_char('d', 4) assert s.build() == 'aabcbdddd' assert isinstance(s.build(), unicode) + +def test_string_repeat(): + raises(MemoryError, string_repeat, "abc", sys.maxint) From afa at codespeak.net Thu Sep 30 00:16:27 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Thu, 30 Sep 2010 00:16:27 +0200 (CEST) Subject: [pypy-svn] r77477 - in pypy/branch/fast-forward: . lib-python/modified-2.5.2 lib-python/modified-2.7.0 pypy/config pypy/config/test pypy/interpreter pypy/jit/backend/cli pypy/jit/backend/llgraph pypy/jit/backend/llsupport pypy/jit/backend/llsupport/test pypy/jit/backend/llvm pypy/jit/backend/test pypy/jit/backend/x86 pypy/jit/backend/x86/test pypy/jit/codewriter pypy/jit/codewriter/test pypy/jit/metainterp pypy/jit/metainterp/optimizeopt pypy/jit/metainterp/test pypy/jit/tool pypy/module/array/benchmark pypy/module/pypyjit/test pypy/objspace/std pypy/rlib pypy/rlib/test pypy/rpython pypy/rpython/lltypesystem pypy/rpython/lltypesystem/test pypy/rpython/memory pypy/rpython/memory/gc pypy/rpython/memory/gc/test pypy/rpython/memory/gctransform pypy/rpython/memory/test pypy/tool pypy/translator/c pypy/translator/c/gcc pypy/translator/c/test Message-ID: <20100929221627.92416282B9E@codespeak.net> Author: afa Date: Thu Sep 30 00:16:20 2010 New Revision: 77477 Added: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_string.py - copied unchanged from r77476, pypy/trunk/pypy/jit/backend/x86/test/test_string.py pypy/branch/fast-forward/pypy/jit/metainterp/optimize_nopspec.py - copied unchanged from r77476, pypy/trunk/pypy/jit/metainterp/optimize_nopspec.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/string.py - copied unchanged from r77476, pypy/trunk/pypy/jit/metainterp/optimizeopt/string.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_loop_nopspec.py - copied unchanged from r77476, pypy/trunk/pypy/jit/metainterp/test/test_loop_nopspec.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_resoperation.py - copied unchanged from r77476, pypy/trunk/pypy/jit/metainterp/test/test_resoperation.py pypy/branch/fast-forward/pypy/rpython/memory/gc/inspector.py - copied unchanged from r77476, pypy/trunk/pypy/rpython/memory/gc/inspector.py Removed: pypy/branch/fast-forward/pypy/rpython/memory/gc/inspect.py Modified: pypy/branch/fast-forward/ (props changed) pypy/branch/fast-forward/lib-python/modified-2.5.2/opcode.py pypy/branch/fast-forward/lib-python/modified-2.7.0/opcode.py pypy/branch/fast-forward/pypy/config/test/test_pypyoption.py pypy/branch/fast-forward/pypy/config/translationoption.py pypy/branch/fast-forward/pypy/interpreter/baseobjspace.py pypy/branch/fast-forward/pypy/interpreter/pyopcode.py pypy/branch/fast-forward/pypy/jit/backend/cli/method.py pypy/branch/fast-forward/pypy/jit/backend/cli/runner.py pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py pypy/branch/fast-forward/pypy/jit/backend/llgraph/runner.py pypy/branch/fast-forward/pypy/jit/backend/llsupport/gc.py pypy/branch/fast-forward/pypy/jit/backend/llsupport/regalloc.py pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_gc.py pypy/branch/fast-forward/pypy/jit/backend/llvm/compile.py pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py pypy/branch/fast-forward/pypy/jit/backend/test/test_ll_random.py pypy/branch/fast-forward/pypy/jit/backend/test/test_random.py pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py pypy/branch/fast-forward/pypy/jit/backend/x86/codebuf.py pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py pypy/branch/fast-forward/pypy/jit/backend/x86/regloc.py pypy/branch/fast-forward/pypy/jit/backend/x86/rx86.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/conftest.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_recompilation.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regalloc.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regloc.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_runner.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_zrpy_gc.py pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_ztranslation.py pypy/branch/fast-forward/pypy/jit/codewriter/assembler.py pypy/branch/fast-forward/pypy/jit/codewriter/call.py pypy/branch/fast-forward/pypy/jit/codewriter/codewriter.py pypy/branch/fast-forward/pypy/jit/codewriter/effectinfo.py pypy/branch/fast-forward/pypy/jit/codewriter/jtransform.py pypy/branch/fast-forward/pypy/jit/codewriter/support.py pypy/branch/fast-forward/pypy/jit/codewriter/test/test_jtransform.py pypy/branch/fast-forward/pypy/jit/codewriter/test/test_list.py pypy/branch/fast-forward/pypy/jit/codewriter/test/test_support.py pypy/branch/fast-forward/pypy/jit/metainterp/blackhole.py pypy/branch/fast-forward/pypy/jit/metainterp/compile.py pypy/branch/fast-forward/pypy/jit/metainterp/executor.py pypy/branch/fast-forward/pypy/jit/metainterp/gc.py pypy/branch/fast-forward/pypy/jit/metainterp/graphpage.py pypy/branch/fast-forward/pypy/jit/metainterp/history.py pypy/branch/fast-forward/pypy/jit/metainterp/logger.py pypy/branch/fast-forward/pypy/jit/metainterp/optimize.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizefindnode.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/__init__.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/heap.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/intbounds.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/rewrite.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/fast-forward/pypy/jit/metainterp/optimizeutil.py pypy/branch/fast-forward/pypy/jit/metainterp/pyjitpl.py pypy/branch/fast-forward/pypy/jit/metainterp/resoperation.py pypy/branch/fast-forward/pypy/jit/metainterp/resume.py pypy/branch/fast-forward/pypy/jit/metainterp/simple_optimize.py pypy/branch/fast-forward/pypy/jit/metainterp/test/oparser.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_basic.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_logger.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_loop.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_oparser.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizefindnode.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_recursive.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_resume.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_string.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_virtualref.py pypy/branch/fast-forward/pypy/jit/metainterp/test/test_ztranslation.py pypy/branch/fast-forward/pypy/jit/metainterp/warmstate.py pypy/branch/fast-forward/pypy/jit/tool/loopviewer.py pypy/branch/fast-forward/pypy/jit/tool/showstats.py pypy/branch/fast-forward/pypy/jit/tool/traceviewer.py pypy/branch/fast-forward/pypy/module/array/benchmark/sumtst.c pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py pypy/branch/fast-forward/pypy/objspace/std/objspace.py pypy/branch/fast-forward/pypy/objspace/std/stringtype.py pypy/branch/fast-forward/pypy/rlib/jit.py pypy/branch/fast-forward/pypy/rlib/rmmap.py pypy/branch/fast-forward/pypy/rlib/rstring.py pypy/branch/fast-forward/pypy/rlib/test/test_rstring.py pypy/branch/fast-forward/pypy/rpython/annlowlevel.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/llarena.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/rlist.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/rstr.py pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_ll2ctypes.py pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py pypy/branch/fast-forward/pypy/rpython/memory/gc/minimark.py pypy/branch/fast-forward/pypy/rpython/memory/gc/minimarkpage.py pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimark.py pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimarkpage.py pypy/branch/fast-forward/pypy/rpython/memory/gctransform/framework.py pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py pypy/branch/fast-forward/pypy/rpython/memory/test/test_gc.py pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py pypy/branch/fast-forward/pypy/tool/progressbar.py pypy/branch/fast-forward/pypy/translator/c/gcc/trackgcroot.py pypy/branch/fast-forward/pypy/translator/c/genc.py pypy/branch/fast-forward/pypy/translator/c/test/test_lltyped.py Log: Merge from trunk Modified: pypy/branch/fast-forward/lib-python/modified-2.5.2/opcode.py ============================================================================== --- pypy/branch/fast-forward/lib-python/modified-2.5.2/opcode.py (original) +++ pypy/branch/fast-forward/lib-python/modified-2.5.2/opcode.py Thu Sep 30 00:16:20 2010 @@ -185,6 +185,7 @@ # pypy modification, experimental bytecode def_op('CALL_LIKELY_BUILTIN', 144) # #args + (#kwargs << 8) def_op('LOOKUP_METHOD', 145) # Index in name list +hasname.append(145) def_op('CALL_METHOD', 146) # #args not including 'self' Modified: pypy/branch/fast-forward/lib-python/modified-2.7.0/opcode.py ============================================================================== --- pypy/branch/fast-forward/lib-python/modified-2.7.0/opcode.py (original) +++ pypy/branch/fast-forward/lib-python/modified-2.7.0/opcode.py Thu Sep 30 00:16:20 2010 @@ -1,4 +1,3 @@ - """ opcode module - potentially shared between dis and other modules which operate on bytecodes (e.g. peephole optimizers). @@ -192,6 +191,7 @@ # pypy modification, experimental bytecode def_op('CALL_LIKELY_BUILTIN', 200) # #args + (#kwargs << 8) def_op('LOOKUP_METHOD', 201) # Index in name list +hasname.append(201) def_op('CALL_METHOD', 202) # #args not including 'self' del def_op, name_op, jrel_op, jabs_op Modified: pypy/branch/fast-forward/pypy/config/test/test_pypyoption.py ============================================================================== --- pypy/branch/fast-forward/pypy/config/test/test_pypyoption.py (original) +++ pypy/branch/fast-forward/pypy/config/test/test_pypyoption.py Thu Sep 30 00:16:20 2010 @@ -41,7 +41,7 @@ assert not conf.translation.backendopt.none conf = get_pypy_config() set_opt_level(conf, 'mem') - assert conf.translation.gc == 'markcompact' + assert conf.translation.gcremovetypeptr assert not conf.translation.backendopt.none def test_set_pypy_opt_level(): Modified: pypy/branch/fast-forward/pypy/config/translationoption.py ============================================================================== --- pypy/branch/fast-forward/pypy/config/translationoption.py (original) +++ pypy/branch/fast-forward/pypy/config/translationoption.py Thu Sep 30 00:16:20 2010 @@ -11,6 +11,8 @@ DEFL_CLEVER_MALLOC_REMOVAL_INLINE_THRESHOLD = 32.4 DEFL_LOW_INLINE_THRESHOLD = DEFL_INLINE_THRESHOLD / 2.0 +DEFL_GC = "minimark" + IS_64_BITS = sys.maxint > 2147483647 PLATFORMS = [ @@ -105,7 +107,7 @@ # JIT generation: use -Ojit to enable it BoolOption("jit", "generate a JIT", default=False, - suggests=[("translation.gc", "hybrid"), + suggests=[("translation.gc", DEFL_GC), ("translation.gcrootfinder", "asmgcc"), ("translation.list_comprehension_operations", True)]), ChoiceOption("jit_backend", "choose the backend for the JIT", @@ -337,10 +339,10 @@ '0': 'boehm nobackendopt', '1': 'boehm lowinline', 'size': 'boehm lowinline remove_asserts', - 'mem': 'markcompact lowinline remove_asserts removetypeptr', - '2': 'hybrid extraopts', - '3': 'hybrid extraopts remove_asserts', - 'jit': 'hybrid extraopts jit', + 'mem': DEFL_GC + ' lowinline remove_asserts removetypeptr', + '2': DEFL_GC + ' extraopts', + '3': DEFL_GC + ' extraopts remove_asserts', + 'jit': DEFL_GC + ' extraopts jit', } def final_check_config(config): Modified: pypy/branch/fast-forward/pypy/interpreter/baseobjspace.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/baseobjspace.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/baseobjspace.py Thu Sep 30 00:16:20 2010 @@ -12,6 +12,7 @@ from pypy.rlib.timer import DummyTimer, Timer from pypy.rlib.rarithmetic import r_uint from pypy.rlib import jit +from pypy.tool.sourcetools import func_with_new_name import os, sys, py __all__ = ['ObjSpace', 'OperationError', 'Wrappable', 'W_Root'] @@ -749,12 +750,17 @@ (i, plural))) return items + unpackiterable_unroll = jit.unroll_safe(func_with_new_name(unpackiterable, + 'unpackiterable_unroll')) + def fixedview(self, w_iterable, expected_length=-1): """ A fixed list view of w_iterable. Don't modify the result """ return make_sure_not_resized(self.unpackiterable(w_iterable, expected_length)[:]) + fixedview_unroll = fixedview + def listview(self, w_iterable, expected_length=-1): """ A non-fixed view of w_iterable. Don't modify the result """ Modified: pypy/branch/fast-forward/pypy/interpreter/pyopcode.py ============================================================================== --- pypy/branch/fast-forward/pypy/interpreter/pyopcode.py (original) +++ pypy/branch/fast-forward/pypy/interpreter/pyopcode.py Thu Sep 30 00:16:20 2010 @@ -637,7 +637,7 @@ def UNPACK_SEQUENCE(self, itemcount, next_instr): w_iterable = self.popvalue() - items = self.space.fixedview(w_iterable, itemcount) + items = self.space.fixedview_unroll(w_iterable, itemcount) self.pushrevvalues(itemcount, items) def STORE_ATTR(self, nameindex, next_instr): Modified: pypy/branch/fast-forward/pypy/jit/backend/cli/method.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/cli/method.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/cli/method.py Thu Sep 30 00:16:20 2010 @@ -207,9 +207,9 @@ def _collect_types(self, operations, box2classes): for op in operations: - if op.opnum in (rop.GETFIELD_GC, rop.SETFIELD_GC): + if op.getopnum() in (rop.GETFIELD_GC, rop.SETFIELD_GC): box = op.args[0] - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.FieldDescr) box2classes.setdefault(box, []).append(descr.selfclass) if op in self.cliloop.guard2ops: @@ -335,7 +335,7 @@ while self.i < N: op = oplist[self.i] self.emit_debug(op.repr()) - func = self.operations[op.opnum] + func = self.operations[op.getopnum()] assert func is not None func(self, op) self.i += 1 @@ -357,10 +357,10 @@ assert op.is_guard() if op in self.cliloop.guard2ops: inputargs, suboperations = self.cliloop.guard2ops[op] - self.match_var_fox_boxes(op.fail_args, inputargs) + self.match_var_fox_boxes(op.getfailargs(), inputargs) self.emit_operations(suboperations) else: - self.emit_return_failed_op(op, op.fail_args) + self.emit_return_failed_op(op, op.getfailargs()) def emit_end(self): assert self.branches == [] @@ -410,7 +410,7 @@ def emit_ovf_op(self, op, emit_op): next_op = self.oplist[self.i+1] - if next_op.opnum == rop.GUARD_NO_OVERFLOW: + if next_op.getopnum() == rop.GUARD_NO_OVERFLOW: self.i += 1 self.emit_ovf_op_and_guard(op, next_op, emit_op) return @@ -544,7 +544,7 @@ self.emit_guard_overflow_impl(op, OpCodes.Brfalse) def emit_op_jump(self, op): - target_token = op.descr + target_token = op.getdescr() assert isinstance(target_token, LoopToken) if target_token.cliloop is self.cliloop: # jump to the beginning of the loop @@ -586,7 +586,7 @@ self.store_result(op) def emit_op_instanceof(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) clitype = descr.get_clitype() op.args[0].load(self) @@ -604,7 +604,7 @@ self.store_result(op) def emit_op_call_impl(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.StaticMethDescr) delegate_type = descr.get_delegate_clitype() meth_invoke = descr.get_meth_info() @@ -619,7 +619,7 @@ emit_op_call_pure = emit_op_call def emit_op_oosend(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.MethDescr) clitype = descr.get_self_clitype() methinfo = descr.get_meth_info() @@ -639,7 +639,7 @@ self.store_result(op) def emit_op_getfield_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.FieldDescr) clitype = descr.get_self_clitype() fieldinfo = descr.get_field_info() @@ -653,7 +653,7 @@ emit_op_getfield_gc_pure = emit_op_getfield_gc def emit_op_setfield_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.FieldDescr) clitype = descr.get_self_clitype() fieldinfo = descr.get_field_info() @@ -665,7 +665,7 @@ self.il.Emit(OpCodes.Stfld, fieldinfo) def emit_op_getarrayitem_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) clitype = descr.get_array_clitype() itemtype = descr.get_clitype() @@ -678,7 +678,7 @@ emit_op_getarrayitem_gc_pure = emit_op_getarrayitem_gc def emit_op_setarrayitem_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) clitype = descr.get_array_clitype() itemtype = descr.get_clitype() @@ -689,7 +689,7 @@ self.il.Emit(OpCodes.Stelem, itemtype) def emit_op_arraylen_gc(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) clitype = descr.get_array_clitype() op.args[0].load(self) @@ -698,7 +698,7 @@ self.store_result(op) def emit_op_new_array(self, op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, runner.TypeDescr) item_clitype = descr.get_clitype() if item_clitype is None: Modified: pypy/branch/fast-forward/pypy/jit/backend/cli/runner.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/cli/runner.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/cli/runner.py Thu Sep 30 00:16:20 2010 @@ -105,7 +105,7 @@ def _attach_token_to_faildescrs(self, token, operations): for op in operations: if op.is_guard(): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, AbstractFailDescr) descr._loop_token = token descr._guard_op = op @@ -136,7 +136,7 @@ func = cliloop.funcbox.holder.GetFunc() func(self.get_inputargs()) op = self.failing_ops[self.inputargs.get_failed_op()] - return op.descr + return op.getdescr() def set_future_value_int(self, index, intvalue): self.get_inputargs().set_int(index, intvalue) Modified: pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py Thu Sep 30 00:16:20 2010 @@ -129,7 +129,7 @@ 'arraylen_gc' : (('ref',), 'int'), 'call' : (('ref', 'varargs'), 'intorptr'), 'call_assembler' : (('varargs',), 'intorptr'), - 'cond_call_gc_wb' : (('ptr', 'ptr'), None), + 'cond_call_gc_wb' : (('ptr',), None), 'oosend' : (('varargs',), 'intorptr'), 'oosend_pure' : (('varargs',), 'intorptr'), 'guard_true' : (('bool',), None), @@ -810,7 +810,7 @@ FLOAT: 0.0} return d[calldescr.typeinfo] - def op_cond_call_gc_wb(self, descr, a, b): + def op_cond_call_gc_wb(self, descr, a): py.test.skip("cond_call_gc_wb not supported") def op_oosend(self, descr, obj, *args): @@ -1382,6 +1382,20 @@ uni = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string) uni.chars[index] = unichr(newvalue) +def do_copystrcontent(src, dst, srcstart, dststart, length): + src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), src) + dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), dst) + assert 0 <= srcstart <= srcstart + length <= len(src.chars) + assert 0 <= dststart <= dststart + length <= len(dst.chars) + rstr.copy_string_contents(src, dst, srcstart, dststart, length) + +def do_copyunicodecontent(src, dst, srcstart, dststart, length): + src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), src) + dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), dst) + assert 0 <= srcstart <= srcstart + length <= len(src.chars) + assert 0 <= dststart <= dststart + length <= len(dst.chars) + rstr.copy_unicode_contents(src, dst, srcstart, dststart, length) + # ---------- call ---------- _call_args_i = [] Modified: pypy/branch/fast-forward/pypy/jit/backend/llgraph/runner.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/llgraph/runner.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/llgraph/runner.py Thu Sep 30 00:16:20 2010 @@ -151,16 +151,17 @@ def _compile_operations(self, c, operations, var2index): for op in operations: - llimpl.compile_add(c, op.opnum) - descr = op.descr + llimpl.compile_add(c, op.getopnum()) + descr = op.getdescr() if isinstance(descr, Descr): llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo) - if isinstance(descr, history.LoopToken) and op.opnum != rop.JUMP: + if isinstance(descr, history.LoopToken) and op.getopnum() != rop.JUMP: llimpl.compile_add_loop_token(c, descr) if self.is_oo and isinstance(descr, (OODescr, MethDescr)): # hack hack, not rpython - c._obj.externalobj.operations[-1].descr = descr - for x in op.args: + c._obj.externalobj.operations[-1].setdescr(descr) + for i in range(op.numargs()): + x = op.getarg(i) if isinstance(x, history.Box): llimpl.compile_add_var(c, var2index[x]) elif isinstance(x, history.ConstInt): @@ -173,10 +174,10 @@ raise Exception("'%s' args contain: %r" % (op.getopname(), x)) if op.is_guard(): - faildescr = op.descr + faildescr = op.getdescr() assert isinstance(faildescr, history.AbstractFailDescr) faildescr._fail_args_types = [] - for box in op.fail_args: + for box in op.getfailargs(): if box is None: type = history.HOLE else: @@ -185,7 +186,7 @@ fail_index = self.get_fail_descr_number(faildescr) index = llimpl.compile_add_fail(c, fail_index) faildescr._compiled_fail = c, index - for box in op.fail_args: + for box in op.getfailargs(): if box is not None: llimpl.compile_add_fail_arg(c, var2index[box]) else: @@ -203,13 +204,13 @@ x)) op = operations[-1] assert op.is_final() - if op.opnum == rop.JUMP: - targettoken = op.descr + if op.getopnum() == rop.JUMP: + targettoken = op.getdescr() assert isinstance(targettoken, history.LoopToken) compiled_version = targettoken._llgraph_compiled_version llimpl.compile_add_jump_target(c, compiled_version) - elif op.opnum == rop.FINISH: - faildescr = op.descr + elif op.getopnum() == rop.FINISH: + faildescr = op.getdescr() index = self.get_fail_descr_number(faildescr) llimpl.compile_add_fail(c, index) else: @@ -280,7 +281,7 @@ def __init__(self, *args, **kwds): BaseCPU.__init__(self, *args, **kwds) self.fielddescrof_vtable = self.fielddescrof(rclass.OBJECT, 'typeptr') - + def fielddescrof(self, S, fieldname): ofs, size = symbolic.get_field_token(S, fieldname) token = history.getkind(getattr(S, fieldname)) @@ -504,7 +505,7 @@ return ootype.cast_to_object(e) else: return ootype.NULL - + def get_exc_value(self): if llimpl._last_exception: earg = llimpl._last_exception.args[1] @@ -580,7 +581,7 @@ x = descr.callmeth(selfbox, argboxes) # XXX: return None if METH.RESULT is Void return x - + def make_getargs(ARGS): argsiter = unrolling_iterable(ARGS) @@ -612,7 +613,7 @@ class KeyManager(object): """ Helper class to convert arbitrary dictionary keys to integers. - """ + """ def __init__(self): self.keys = {} @@ -695,7 +696,7 @@ self.ARRAY = ARRAY = ootype.Array(TYPE) def create(): return boxresult(TYPE, ootype.new(TYPE)) - + def create_array(lengthbox): n = lengthbox.getint() return boxresult(ARRAY, ootype.oonewarray(ARRAY, n)) @@ -757,7 +758,7 @@ obj = objbox.getref(TYPE) value = unwrap(T, valuebox) setattr(obj, fieldname, value) - + self.getfield = getfield self.setfield = setfield self._is_pointer_field = (history.getkind(T) == 'ref') Modified: pypy/branch/fast-forward/pypy/jit/backend/llsupport/gc.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/llsupport/gc.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/llsupport/gc.py Thu Sep 30 00:16:20 2010 @@ -41,9 +41,12 @@ moving_gc = False gcrootmap = None - def __init__(self, gcdescr, translator, rtyper): - GcLLDescription.__init__(self, gcdescr, translator, rtyper) - # grab a pointer to the Boehm 'malloc' function + @classmethod + def configure_boehm_once(cls): + """ Configure boehm only once, since we don't cache failures + """ + if hasattr(cls, 'malloc_fn_ptr'): + return cls.malloc_fn_ptr from pypy.rpython.tool import rffi_platform compilation_info = rffi_platform.configure_boehm() @@ -59,13 +62,20 @@ GC_MALLOC = "GC_local_malloc" else: GC_MALLOC = "GC_malloc" - malloc_fn_ptr = rffi.llexternal(GC_MALLOC, [lltype.Signed], # size_t, but good enough llmemory.GCREF, compilation_info=compilation_info, sandboxsafe=True, _nowrapper=True) + cls.malloc_fn_ptr = malloc_fn_ptr + cls.compilation_info = compilation_info + return malloc_fn_ptr + + def __init__(self, gcdescr, translator, rtyper): + GcLLDescription.__init__(self, gcdescr, translator, rtyper) + # grab a pointer to the Boehm 'malloc' function + malloc_fn_ptr = self.configure_boehm_once() self.funcptr_for_new = malloc_fn_ptr # on some platform GC_init is required before any other @@ -73,7 +83,7 @@ # XXX move this to tests init_fn_ptr = rffi.llexternal("GC_init", [], lltype.Void, - compilation_info=compilation_info, + compilation_info=self.compilation_info, sandboxsafe=True, _nowrapper=True) @@ -123,7 +133,7 @@ # ____________________________________________________________ -# All code below is for the hybrid GC +# All code below is for the hybrid or minimark GC class GcRefList: @@ -157,7 +167,7 @@ def alloc_gcref_list(self, n): # Important: the GRREF_LISTs allocated are *non-movable*. This - # requires support in the gc (only the hybrid GC supports it so far). + # requires support in the gc (hybrid GC or minimark GC so far). if we_are_translated(): list = rgc.malloc_nonmovable(self.GCREF_LIST, n) assert list, "malloc_nonmovable failed!" @@ -340,8 +350,9 @@ self.translator = translator self.llop1 = llop1 - # we need the hybrid GC for GcRefList.alloc_gcref_list() to work - if gcdescr.config.translation.gc != 'hybrid': + # we need the hybrid or minimark GC for GcRefList.alloc_gcref_list() + # to work + if gcdescr.config.translation.gc not in ('hybrid', 'minimark'): raise NotImplementedError("--gc=%s not implemented with the JIT" % (gcdescr.config.translation.gc,)) @@ -372,8 +383,7 @@ self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO) (self.array_basesize, _, self.array_length_ofs) = \ symbolic.get_array_token(lltype.GcArray(lltype.Signed), True) - min_ns = self.GCClass.TRANSLATION_PARAMS['min_nursery_size'] - self.max_size_of_young_obj = self.GCClass.get_young_fixedsize(min_ns) + self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() # make a malloc function, with three arguments def malloc_basic(size, tid): @@ -394,7 +404,7 @@ self.GC_MALLOC_BASIC = lltype.Ptr(lltype.FuncType( [lltype.Signed, lltype.Signed], llmemory.GCREF)) self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( - [llmemory.Address, llmemory.Address], lltype.Void)) + [llmemory.Address], lltype.Void)) self.write_barrier_descr = WriteBarrierDescr(self) # def malloc_array(itemsize, tid, num_elem): @@ -540,8 +550,7 @@ # the GC, and call it immediately llop1 = self.llop1 funcptr = llop1.get_write_barrier_failing_case(self.WB_FUNCPTR) - funcptr(llmemory.cast_ptr_to_adr(gcref_struct), - llmemory.cast_ptr_to_adr(gcref_newptr)) + funcptr(llmemory.cast_ptr_to_adr(gcref_struct)) def rewrite_assembler(self, cpu, operations): # Perform two kinds of rewrites in parallel: @@ -559,12 +568,12 @@ # newops = [] for op in operations: - if op.opnum == rop.DEBUG_MERGE_POINT: + if op.getopnum() == rop.DEBUG_MERGE_POINT: continue # ---------- replace ConstPtrs with GETFIELD_RAW ---------- # xxx some performance issue here - for i in range(len(op.args)): - v = op.args[i] + for i in range(op.numargs()): + v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): addr = self.gcrefs.get_address_of_gcref(v.value) # ^^^even for non-movable objects, to record their presence @@ -574,30 +583,30 @@ newops.append(ResOperation(rop.GETFIELD_RAW, [ConstInt(addr)], box, self.single_gcref_descr)) - op.args[i] = box + op.setarg(i, box) # ---------- write barrier for SETFIELD_GC ---------- - if op.opnum == rop.SETFIELD_GC: - v = op.args[1] + if op.getopnum() == rop.SETFIELD_GC: + v = op.getarg(1) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.args[0], v) - op = ResOperation(rop.SETFIELD_RAW, op.args, None, - descr=op.descr) + self._gen_write_barrier(newops, op.getarg(0)) + op = op.copy_and_change(rop.SETFIELD_RAW) # ---------- write barrier for SETARRAYITEM_GC ---------- - if op.opnum == rop.SETARRAYITEM_GC: - v = op.args[2] + if op.getopnum() == rop.SETARRAYITEM_GC: + v = op.getarg(2) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.args[0], v) - op = ResOperation(rop.SETARRAYITEM_RAW, op.args, None, - descr=op.descr) + # XXX detect when we should produce a + # write_barrier_from_array + self._gen_write_barrier(newops, op.getarg(0)) + op = op.copy_and_change(rop.SETARRAYITEM_RAW) # ---------- newops.append(op) del operations[:] operations.extend(newops) - def _gen_write_barrier(self, newops, v_base, v_value): - args = [v_base, v_value] + def _gen_write_barrier(self, newops, v_base): + args = [v_base] newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, descr=self.write_barrier_descr)) Modified: pypy/branch/fast-forward/pypy/jit/backend/llsupport/regalloc.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/llsupport/regalloc.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/llsupport/regalloc.py Thu Sep 30 00:16:20 2010 @@ -81,6 +81,10 @@ for v in vars: self.possibly_free_var(v) + def possibly_free_vars_for_op(self, op): + for i in range(op.numargs()): + self.possibly_free_var(op.getarg(i)) + def _check_invariants(self): if not we_are_translated(): # make sure no duplicates Modified: pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_gc.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_gc.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_gc.py Thu Sep 30 00:16:20 2010 @@ -141,19 +141,20 @@ repr(offset_to_length), p)) return p - def _write_barrier_failing_case(self, adr_struct, adr_newptr): - self.record.append(('barrier', adr_struct, adr_newptr)) + def _write_barrier_failing_case(self, adr_struct): + self.record.append(('barrier', adr_struct)) def get_write_barrier_failing_case(self, FPTRTYPE): return llhelper(FPTRTYPE, self._write_barrier_failing_case) class TestFramework: + gc = 'hybrid' def setup_method(self, meth): class config_: class translation: - gc = 'hybrid' + gc = self.gc gcrootfinder = 'asmgcc' gctransformer = 'framework' gcremovetypeptr = False @@ -238,7 +239,6 @@ s_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) r_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, r) s_adr = llmemory.cast_ptr_to_adr(s) - r_adr = llmemory.cast_ptr_to_adr(r) # s_hdr.tid &= ~gc_ll_descr.GCClass.JIT_WB_IF_FLAG gc_ll_descr.do_write_barrier(s_gcref, r_gcref) @@ -246,7 +246,7 @@ # s_hdr.tid |= gc_ll_descr.GCClass.JIT_WB_IF_FLAG gc_ll_descr.do_write_barrier(s_gcref, r_gcref) - assert self.llop1.record == [('barrier', s_adr, r_adr)] + assert self.llop1.record == [('barrier', s_adr)] def test_gen_write_barrier(self): gc_ll_descr = self.gc_ll_descr @@ -254,22 +254,20 @@ # newops = [] v_base = BoxPtr() - v_value = BoxPtr() - gc_ll_descr._gen_write_barrier(newops, v_base, v_value) + gc_ll_descr._gen_write_barrier(newops, v_base) assert llop1.record == [] assert len(newops) == 1 - assert newops[0].opnum == rop.COND_CALL_GC_WB - assert newops[0].args[0] == v_base - assert newops[0].args[1] == v_value + assert newops[0].getopnum() == rop.COND_CALL_GC_WB + assert newops[0].getarg(0) == v_base assert newops[0].result is None - wbdescr = newops[0].descr + wbdescr = newops[0].getdescr() assert isinstance(wbdescr.jit_wb_if_flag, int) assert isinstance(wbdescr.jit_wb_if_flag_byteofs, int) assert isinstance(wbdescr.jit_wb_if_flag_singlebyte, int) def test_get_rid_of_debug_merge_point(self): operations = [ - ResOperation(rop.DEBUG_MERGE_POINT, [], None), + ResOperation(rop.DEBUG_MERGE_POINT, ['dummy'], None), ] gc_ll_descr = self.gc_ll_descr gc_ll_descr.rewrite_assembler(None, operations) @@ -298,13 +296,14 @@ gc_ll_descr.gcrefs = MyFakeGCRefList() gc_ll_descr.rewrite_assembler(MyFakeCPU(), operations) assert len(operations) == 2 - assert operations[0].opnum == rop.GETFIELD_RAW - assert operations[0].args == [ConstInt(43)] - assert operations[0].descr == gc_ll_descr.single_gcref_descr + assert operations[0].getopnum() == rop.GETFIELD_RAW + assert operations[0].getarg(0) == ConstInt(43) + assert operations[0].getdescr() == gc_ll_descr.single_gcref_descr v_box = operations[0].result assert isinstance(v_box, BoxPtr) - assert operations[1].opnum == rop.PTR_EQ - assert operations[1].args == [v_random_box, v_box] + assert operations[1].getopnum() == rop.PTR_EQ + assert operations[1].getarg(0) == v_random_box + assert operations[1].getarg(1) == v_box assert operations[1].result == v_result def test_rewrite_assembler_1_cannot_move(self): @@ -336,8 +335,9 @@ finally: rgc.can_move = old_can_move assert len(operations) == 1 - assert operations[0].opnum == rop.PTR_EQ - assert operations[0].args == [v_random_box, ConstPtr(s_gcref)] + assert operations[0].getopnum() == rop.PTR_EQ + assert operations[0].getarg(0) == v_random_box + assert operations[0].getarg(1) == ConstPtr(s_gcref) assert operations[0].result == v_result # check that s_gcref gets added to the list anyway, to make sure # that the GC sees it @@ -356,14 +356,14 @@ gc_ll_descr.rewrite_assembler(self.fake_cpu, operations) assert len(operations) == 2 # - assert operations[0].opnum == rop.COND_CALL_GC_WB - assert operations[0].args[0] == v_base - assert operations[0].args[1] == v_value + assert operations[0].getopnum() == rop.COND_CALL_GC_WB + assert operations[0].getarg(0) == v_base assert operations[0].result is None # - assert operations[1].opnum == rop.SETFIELD_RAW - assert operations[1].args == [v_base, v_value] - assert operations[1].descr == field_descr + assert operations[1].getopnum() == rop.SETFIELD_RAW + assert operations[1].getarg(0) == v_base + assert operations[1].getarg(1) == v_value + assert operations[1].getdescr() == field_descr def test_rewrite_assembler_3(self): # check write barriers before SETARRAYITEM_GC @@ -379,11 +379,16 @@ gc_ll_descr.rewrite_assembler(self.fake_cpu, operations) assert len(operations) == 2 # - assert operations[0].opnum == rop.COND_CALL_GC_WB - assert operations[0].args[0] == v_base - assert operations[0].args[1] == v_value + assert operations[0].getopnum() == rop.COND_CALL_GC_WB + assert operations[0].getarg(0) == v_base assert operations[0].result is None # - assert operations[1].opnum == rop.SETARRAYITEM_RAW - assert operations[1].args == [v_base, v_index, v_value] - assert operations[1].descr == array_descr + assert operations[1].getopnum() == rop.SETARRAYITEM_RAW + assert operations[1].getarg(0) == v_base + assert operations[1].getarg(1) == v_index + assert operations[1].getarg(2) == v_value + assert operations[1].getdescr() == array_descr + + +class TestFrameworkMiniMark(TestFramework): + gc = 'minimark' Modified: pypy/branch/fast-forward/pypy/jit/backend/llvm/compile.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/llvm/compile.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/llvm/compile.py Thu Sep 30 00:16:20 2010 @@ -107,7 +107,7 @@ # store away the exception into self.backup_exc_xxx, *unless* the # branch starts with a further GUARD_EXCEPTION/GUARD_NO_EXCEPTION. if exc: - opnum = operations[0].opnum + opnum = operations[0].getopnum() if opnum not in (rop.GUARD_EXCEPTION, rop.GUARD_NO_EXCEPTION): self._store_away_exception() # Normal handling of the operations follows. @@ -115,7 +115,7 @@ self._generate_op(op) def _generate_op(self, op): - opnum = op.opnum + opnum = op.getopnum() for i, name in all_operations: if opnum == i: meth = getattr(self, name) @@ -475,7 +475,7 @@ return location def generate_GETFIELD_GC(self, op): - loc = self._generate_field_gep(op.args[0], op.descr) + loc = self._generate_field_gep(op.args[0], op.getdescr()) self.vars[op.result] = llvm_rffi.LLVMBuildLoad(self.builder, loc, "") generate_GETFIELD_GC_PURE = generate_GETFIELD_GC @@ -483,7 +483,7 @@ generate_GETFIELD_RAW_PURE = generate_GETFIELD_GC def generate_SETFIELD_GC(self, op): - fielddescr = op.descr + fielddescr = op.getdescr() loc = self._generate_field_gep(op.args[0], fielddescr) assert isinstance(fielddescr, FieldDescr) getarg = self.cpu.getarg_by_index[fielddescr.size_index] @@ -491,7 +491,7 @@ llvm_rffi.LLVMBuildStore(self.builder, value_ref, loc, "") def generate_CALL(self, op): - calldescr = op.descr + calldescr = op.getdescr() assert isinstance(calldescr, CallDescr) ty_function_ptr = self.cpu.get_calldescr_ty_function_ptr(calldescr) v = op.args[0] @@ -579,7 +579,7 @@ self.vars[op.result] = llvm_rffi.LLVMBuildLoad(self.builder, loc, "") def generate_ARRAYLEN_GC(self, op): - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, ArrayDescr) self._generate_len(op, arraydescr.ty_array_ptr, self.cpu.const_array_index_length) @@ -598,7 +598,7 @@ return location def _generate_array_gep(self, op): - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, ArrayDescr) location = self._generate_gep(op, arraydescr.ty_array_ptr, self.cpu.const_array_index_array) @@ -612,7 +612,7 @@ def generate_SETARRAYITEM_GC(self, op): loc = self._generate_array_gep(op) - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, ArrayDescr) getarg = self.cpu.getarg_by_index[arraydescr.itemsize_index] value_ref = getarg(self, op.args[2]) @@ -660,7 +660,7 @@ return res def generate_NEW(self, op): - sizedescr = op.descr + sizedescr = op.getdescr() assert isinstance(sizedescr, SizeDescr) res = self._generate_new(self.cpu._make_const_int(sizedescr.size)) self.vars[op.result] = res @@ -695,7 +695,7 @@ self.vars[op.result] = res def generate_NEW_ARRAY(self, op): - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, ArrayDescr) self._generate_new_array(op, arraydescr.ty_array_ptr, self.cpu._make_const_int(arraydescr.itemsize), Modified: pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py Thu Sep 30 00:16:20 2010 @@ -1,5 +1,6 @@ import py, sys, random, os, struct, operator from pypy.jit.metainterp.history import (AbstractFailDescr, + AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, LoopToken, @@ -39,7 +40,7 @@ else: raise NotImplementedError(box) res = self.cpu.execute_token(looptoken) - if res is operations[-1].descr: + if res is operations[-1].getdescr(): self.guard_failed = False else: self.guard_failed = True @@ -74,10 +75,11 @@ ResOperation(rop.FINISH, results, None, descr=BasicFailDescr(0))] if operations[0].is_guard(): - operations[0].fail_args = [] + operations[0].setfailargs([]) if not descr: descr = BasicFailDescr(1) - operations[0].descr = descr + if descr is not None: + operations[0].setdescr(descr) inputargs = [] for box in valueboxes: if isinstance(box, Box) and box not in inputargs: @@ -116,7 +118,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [i1] + operations[2].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) @@ -137,7 +139,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [None, None, i1, None] + operations[2].setfailargs([None, None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) @@ -160,7 +162,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [i1] + operations[2].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) self.cpu.compile_loop(inputargs, operations, looptoken) @@ -184,7 +186,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [i1] + operations[2].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -194,7 +196,7 @@ ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), ResOperation(rop.JUMP, [i1b], None, descr=looptoken), ] - bridge[1].fail_args = [i1b] + bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge) @@ -218,7 +220,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [None, i1, None] + operations[2].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -228,7 +230,7 @@ ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), ResOperation(rop.JUMP, [i1b], None, descr=looptoken), ] - bridge[1].fail_args = [i1b] + bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge) @@ -251,7 +253,7 @@ ResOperation(rop.JUMP, [i1], None, descr=looptoken), ] inputargs = [i0] - operations[2].fail_args = [None, i1, None] + operations[2].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) @@ -317,7 +319,7 @@ descr=BasicFailDescr()), ResOperation(rop.JUMP, [z, t], None, descr=looptoken), ] - operations[-2].fail_args = [t, z] + operations[-2].setfailargs([t, z]) cpu.compile_loop([x, y], operations, looptoken) self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 10) @@ -363,7 +365,7 @@ ResOperation(rop.FINISH, [v_res], None, descr=BasicFailDescr(2)), ] - ops[1].fail_args = [] + ops[1].setfailargs([]) else: v_exc = self.cpu.ts.BoxRef() ops = [ @@ -372,7 +374,7 @@ descr=BasicFailDescr(1)), ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2)), ] - ops[1].fail_args = [v_res] + ops[1].setfailargs([v_res]) # looptoken = LoopToken() self.cpu.compile_loop([v1, v2], ops, looptoken) @@ -814,6 +816,23 @@ r = self.execute_operation(rop.STRGETITEM, [s_box, BoxInt(4)], 'int') assert r.value == 153 + def test_copystrcontent(self): + s_box = self.alloc_string("abcdef") + for s_box in [s_box, s_box.constbox()]: + for srcstart_box in [BoxInt(2), ConstInt(2)]: + for dststart_box in [BoxInt(3), ConstInt(3)]: + for length_box in [BoxInt(4), ConstInt(4)]: + for r_box_is_const in [False, True]: + r_box = self.alloc_string("!???????!") + if r_box_is_const: + r_box = r_box.constbox() + self.execute_operation(rop.COPYSTRCONTENT, + [s_box, r_box, + srcstart_box, + dststart_box, + length_box], 'void') + assert self.look_string(r_box) == "!??cdef?!" + def test_do_unicode_basic(self): u = self.cpu.bh_newunicode(5) self.cpu.bh_unicodesetitem(u, 4, 123) @@ -909,8 +928,8 @@ ResOperation(rop.GUARD_TRUE, [i2], None), ResOperation(rop.JUMP, jumpargs, None, descr=looptoken), ] - operations[2].fail_args = inputargs[:] - operations[2].descr = faildescr + operations[2].setfailargs(inputargs[:]) + operations[2].setdescr(faildescr) # self.cpu.compile_loop(inputargs, operations, looptoken) # @@ -975,7 +994,7 @@ ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), ResOperation(rop.FINISH, fboxes, None, descr=faildescr2), ] - operations[-2].fail_args = fboxes + operations[-2].setfailargs(fboxes) looptoken = LoopToken() self.cpu.compile_loop(fboxes, operations, looptoken) @@ -1098,7 +1117,7 @@ descr=BasicFailDescr(4)), ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(5))] - operations[1].fail_args = [] + operations[1].setfailargs([]) looptoken = LoopToken() # Use "set" to unique-ify inputargs unique_testcase_list = list(set(testcase)) @@ -1197,6 +1216,10 @@ s_box = BoxPtr(lltype.cast_opaque_ptr(llmemory.GCREF, s)) return s_box + def look_string(self, string_box): + s = string_box.getref(lltype.Ptr(rstr.STR)) + return ''.join(s.chars) + def alloc_unicode(self, unicode): u = rstr.mallocunicode(len(unicode)) for i in range(len(unicode)): @@ -1404,15 +1427,15 @@ assert not excvalue def test_cond_call_gc_wb(self): - def func_void(a, b): - record.append((a, b)) + def func_void(a): + record.append(a) record = [] # S = lltype.GcStruct('S', ('tid', lltype.Signed)) - FUNC = self.FuncType([lltype.Ptr(S), lltype.Signed], lltype.Void) + FUNC = self.FuncType([lltype.Ptr(S)], lltype.Void) func_ptr = llhelper(lltype.Ptr(FUNC), func_void) funcbox = self.get_funcbox(self.cpu, func_ptr) - class WriteBarrierDescr: + class WriteBarrierDescr(AbstractDescr): jit_wb_if_flag = 4096 jit_wb_if_flag_byteofs = struct.pack("i", 4096).index('\x10') jit_wb_if_flag_singlebyte = 0x10 @@ -1430,10 +1453,10 @@ sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) del record[:] self.execute_operation(rop.COND_CALL_GC_WB, - [BoxPtr(sgcref), ConstInt(-2121)], + [BoxPtr(sgcref)], 'void', descr=WriteBarrierDescr()) if cond: - assert record == [(s, -2121)] + assert record == [s] else: assert record == [] @@ -1462,7 +1485,7 @@ ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] - ops[2].fail_args = [i1, i0] + ops[2].setfailargs([i1, i0]) looptoken = LoopToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, 20) @@ -1506,7 +1529,7 @@ ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] - ops[2].fail_args = [i1, i2, i0] + ops[2].setfailargs([i1, i2, i0]) looptoken = LoopToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, 20) @@ -1551,7 +1574,7 @@ ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [f2], None, descr=BasicFailDescr(0)) ] - ops[2].fail_args = [i1, f2, i0] + ops[2].setfailargs([i1, f2, i0]) looptoken = LoopToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, 20) @@ -1824,7 +1847,7 @@ f2 = float_add(f0, f1) finish(f2)''' loop = parse(ops) - done_number = self.cpu.get_fail_descr_number(loop.operations[-1].descr) + done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) looptoken = LoopToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) Modified: pypy/branch/fast-forward/pypy/jit/backend/test/test_ll_random.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/test/test_ll_random.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/test/test_ll_random.py Thu Sep 30 00:16:20 2010 @@ -464,7 +464,7 @@ self.put(builder, args, descr) op = ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=BasicFailDescr()) - op.fail_args = fail_subset + op.setfailargs(fail_subset) builder.loop.operations.append(op) # 5. Non raising-call and GUARD_EXCEPTION @@ -486,7 +486,7 @@ exc_box = ConstAddr(llmemory.cast_ptr_to_adr(vtableptr), builder.cpu) op = ResOperation(rop.GUARD_EXCEPTION, [exc_box], BoxPtr(), descr=BasicFailDescr()) - op.fail_args = builder.subset_of_intvars(r) + op.setfailargs(builder.subset_of_intvars(r)) op._exc_box = None builder.should_fail_by = op builder.guard_op = op @@ -507,7 +507,7 @@ exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu) op = ResOperation(rop.GUARD_EXCEPTION, [exc_box], BoxPtr(), descr=BasicFailDescr()) - op.fail_args = fail_subset + op.setfailargs(fail_subset) builder.loop.operations.append(op) # 4. raising call and guard_no_exception @@ -524,7 +524,7 @@ op = ResOperation(rop.GUARD_NO_EXCEPTION, [], BoxPtr(), descr=BasicFailDescr()) op._exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu) - op.fail_args = builder.subset_of_intvars(r) + op.setfailargs(builder.subset_of_intvars(r)) builder.should_fail_by = op builder.guard_op = op builder.loop.operations.append(op) @@ -548,7 +548,7 @@ op = ResOperation(rop.GUARD_EXCEPTION, [other_box], BoxPtr(), descr=BasicFailDescr()) op._exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu) - op.fail_args = builder.subset_of_intvars(r) + op.setfailargs(builder.subset_of_intvars(r)) builder.should_fail_by = op builder.guard_op = op builder.loop.operations.append(op) Modified: pypy/branch/fast-forward/pypy/jit/backend/test/test_random.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/test/test_random.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/test/test_random.py Thu Sep 30 00:16:20 2010 @@ -86,7 +86,7 @@ def process_operation(self, s, op, names, subops): args = [] - for v in op.args: + for v in op.getarglist(): if v in names: args.append(names[v]) ## elif isinstance(v, ConstAddr): @@ -105,11 +105,11 @@ args.append('ConstInt(%d)' % v.value) else: raise NotImplementedError(v) - if op.descr is None: + if op.getdescr() is None: descrstr = '' else: try: - descrstr = ', ' + op.descr._random_info + descrstr = ', ' + op.getdescr()._random_info except AttributeError: descrstr = ', descr=...' print >>s, ' ResOperation(rop.%s, [%s], %s%s),' % ( @@ -129,7 +129,7 @@ def print_loop_prebuilt(ops): for op in ops: - for arg in op.args: + for arg in op.getarglist(): if isinstance(arg, ConstPtr): if arg not in names: writevar(arg, 'const_ptr') @@ -191,7 +191,7 @@ if self.should_fail_by is None: fail_args = self.loop.operations[-1].args else: - fail_args = self.should_fail_by.fail_args + fail_args = self.should_fail_by.getfailargs() for i, v in enumerate(fail_args): if isinstance(v, (BoxFloat, ConstFloat)): print >>s, (' assert cpu.get_latest_value_float(%d) == %r' @@ -284,8 +284,8 @@ builder.intvars[:] = original_intvars else: op = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) - op.descr = BasicFailDescr() - op.fail_args = fail_subset + op.setdescr(BasicFailDescr()) + op.setfailargs(fail_subset) builder.loop.operations.append(op) class BinaryOvfOperation(AbstractOvfOperation, BinaryOperation): @@ -345,8 +345,8 @@ def produce_into(self, builder, r): op, passing = self.gen_guard(builder, r) builder.loop.operations.append(op) - op.descr = BasicFailDescr() - op.fail_args = builder.subset_of_intvars(r) + op.setdescr(BasicFailDescr()) + op.setfailargs(builder.subset_of_intvars(r)) if not passing: builder.should_fail_by = op builder.guard_op = op @@ -553,7 +553,7 @@ endvars = [] used_later = {} for op in loop.operations: - for v in op.args: + for v in op.getarglist(): used_later[v] = True for v in startvars: if v not in used_later: @@ -577,11 +577,11 @@ def get_fail_args(self): if self.should_fail_by.is_guard(): - assert self.should_fail_by.fail_args is not None - return self.should_fail_by.fail_args + assert self.should_fail_by.getfailargs() is not None + return self.should_fail_by.getfailargs() else: - assert self.should_fail_by.opnum == rop.FINISH - return self.should_fail_by.args + assert self.should_fail_by.getopnum() == rop.FINISH + return self.should_fail_by.getarglist() def clear_state(self): for v, S, fields in self.prebuilt_ptr_consts: @@ -606,7 +606,7 @@ else: raise NotImplementedError(box) fail = cpu.execute_token(self.loop.token) - assert fail is self.should_fail_by.descr + assert fail is self.should_fail_by.getdescr() for i, v in enumerate(self.get_fail_args()): if isinstance(v, (BoxFloat, ConstFloat)): value = cpu.get_latest_value_float(i) @@ -620,7 +620,7 @@ exc = cpu.grab_exc_value() if (self.guard_op is not None and self.guard_op.is_guard_exception()): - if self.guard_op.opnum == rop.GUARD_NO_EXCEPTION: + if self.guard_op.getopnum() == rop.GUARD_NO_EXCEPTION: assert exc else: assert not exc @@ -633,26 +633,26 @@ else: op = ResOperation(rop.GUARD_EXCEPTION, [guard_op._exc_box], BoxPtr()) - op.descr = BasicFailDescr() - op.fail_args = [] + op.setdescr(BasicFailDescr()) + op.setfailargs([]) return op if self.dont_generate_more: return False r = self.r guard_op = self.guard_op - fail_args = guard_op.fail_args - fail_descr = guard_op.descr + fail_args = guard_op.getfailargs() + fail_descr = guard_op.getdescr() op = self.should_fail_by - if not op.fail_args: + if not op.getfailargs(): return False # generate the branch: a sequence of operations that ends in a FINISH subloop = DummyLoop([]) if guard_op.is_guard_exception(): subloop.operations.append(exc_handling(guard_op)) bridge_builder = self.builder.fork(self.builder.cpu, subloop, - op.fail_args[:]) - self.generate_ops(bridge_builder, r, subloop, op.fail_args[:]) + op.getfailargs()[:]) + self.generate_ops(bridge_builder, r, subloop, op.getfailargs()[:]) # note that 'self.guard_op' now points to the guard that will fail in # this new bridge, while 'guard_op' still points to the guard that # has just failed. Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py Thu Sep 30 00:16:20 2010 @@ -181,6 +181,7 @@ self.malloc_fixedsize_slowpath1 = 0 self.malloc_fixedsize_slowpath2 = 0 self.pending_guard_tokens = None + self.memcpy_addr = 0 self.setup_failure_recovery() self._debug = False self.debug_counter_descr = cpu.fielddescrof(DEBUG_COUNTER, 'i') @@ -212,6 +213,7 @@ ll_new_unicode = gc_ll_descr.get_funcptr_for_newunicode() self.malloc_unicode_func_addr = rffi.cast(lltype.Signed, ll_new_unicode) + self.memcpy_addr = self.cpu.cast_ptr_to_int(codebuf.memcpy_fn) self.mc = MachineCodeBlockWrapper(self, self.mc_size, self.cpu.profile_agent) self._build_failure_recovery(False) self._build_failure_recovery(True) @@ -390,8 +392,8 @@ def _find_debug_merge_point(self, operations): for op in operations: - if op.opnum == rop.DEBUG_MERGE_POINT: - funcname = op.args[0]._get_str() + if op.getopnum() == rop.DEBUG_MERGE_POINT: + funcname = op.getarg(0)._get_str() break else: funcname = "" % len(self.loop_run_counters) @@ -419,7 +421,6 @@ mc.MOV_ri(X86_64_SCRATCH_REG.value, adr_new_target) mc.JMP_r(X86_64_SCRATCH_REG.value) - mc.valgrind_invalidated() mc.done() def _inject_debugging_code(self, operations): @@ -475,7 +476,6 @@ # align, e.g. for Mac OS X aligned_words = align_stack_words(words+2)-2 # 2 = EIP+EBP mc.writeimm32(-WORD * aligned_words) - mc.valgrind_invalidated() mc.done() def _call_header(self): @@ -598,7 +598,6 @@ target = newlooptoken._x86_direct_bootstrap_code mc = codebuf.InMemoryCodeBuilder(oldadr, oldadr + 16) mc.JMP(imm(target)) - mc.valgrind_invalidated() mc.done() def _assemble_bootstrap_code(self, inputargs, arglocs): @@ -684,25 +683,25 @@ self.mc.POP(loc) def regalloc_perform(self, op, arglocs, resloc): - genop_list[op.opnum](self, op, arglocs, resloc) + genop_list[op.getopnum()](self, op, arglocs, resloc) def regalloc_perform_discard(self, op, arglocs): - genop_discard_list[op.opnum](self, op, arglocs) + genop_discard_list[op.getopnum()](self, op, arglocs) def regalloc_perform_with_guard(self, op, guard_op, faillocs, arglocs, resloc, current_depths): - faildescr = guard_op.descr + faildescr = guard_op.getdescr() assert isinstance(faildescr, AbstractFailDescr) faildescr._x86_current_depths = current_depths - failargs = guard_op.fail_args - guard_opnum = guard_op.opnum + failargs = guard_op.getfailargs() + guard_opnum = guard_op.getopnum() guard_token = self.implement_guard_recovery(guard_opnum, faildescr, failargs, faillocs) if op is None: dispatch_opnum = guard_opnum else: - dispatch_opnum = op.opnum + dispatch_opnum = op.getopnum() res = genop_guard_list[dispatch_opnum](self, op, guard_op, guard_token, arglocs, resloc) faildescr._x86_adr_jump_offset = res @@ -712,8 +711,8 @@ self.regalloc_perform_with_guard(None, guard_op, faillocs, arglocs, resloc, current_depths) - def load_effective_addr(self, sizereg, baseofs, scale, result): - self.mc.LEA(result, addr_add(imm(0), sizereg, baseofs, scale)) + def load_effective_addr(self, sizereg, baseofs, scale, result, frm=imm(0)): + self.mc.LEA(result, addr_add(frm, sizereg, baseofs, scale)) def _unaryop(asmop): def genop_unary(self, op, arglocs, resloc): @@ -728,7 +727,7 @@ def _cmpop(cond, rev_cond): def genop_cmp(self, op, arglocs, result_loc): rl = result_loc.lowest8bits() - if isinstance(op.args[0], Const): + if isinstance(op.getarg(0), Const): self.mc.CMP(arglocs[1], arglocs[0]) self.mc.SET_ir(rx86.Conditions[rev_cond], rl.value) else: @@ -758,8 +757,8 @@ def _cmpop_guard(cond, rev_cond, false_cond, false_rev_cond): def genop_cmp_guard(self, op, guard_op, guard_token, arglocs, result_loc): - guard_opnum = guard_op.opnum - if isinstance(op.args[0], Const): + guard_opnum = guard_op.getopnum() + if isinstance(op.getarg(0), Const): self.mc.CMP(arglocs[1], arglocs[0]) if guard_opnum == rop.GUARD_FALSE: return self.implement_guard(guard_token, rev_cond) @@ -776,7 +775,7 @@ def _cmpop_guard_float(cond, false_cond, need_jp): def genop_cmp_guard_float(self, op, guard_op, guard_token, arglocs, result_loc): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() self.mc.UCOMISD(arglocs[0], arglocs[1]) # 16 is enough space for the rel8 jumps below and the rel32 # jump in implement_guard @@ -945,7 +944,7 @@ genop_guard_float_ge = _cmpop_guard_float("AE", "B", False) def genop_guard_float_ne(self, op, guard_op, guard_token, arglocs, result_loc): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() self.mc.UCOMISD(arglocs[0], arglocs[1]) # 16 is enough space for the rel8 jumps below and the rel32 # jump in implement_guard @@ -973,7 +972,7 @@ self.mc.CVTSI2SD(resloc, arglocs[0]) def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() self.mc.CMP(arglocs[0], imm(0)) if guard_opnum == rop.GUARD_TRUE: return self.implement_guard(guard_token, 'Z') @@ -987,7 +986,7 @@ self.mc.MOVZX8(resloc, rl) def genop_guard_int_is_zero(self, op, guard_op, guard_token, arglocs, resloc): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() self.mc.CMP(arglocs[0], imm(0)) if guard_opnum == rop.GUARD_TRUE: return self.implement_guard(guard_token, 'NZ') @@ -1031,7 +1030,7 @@ if self.cpu.vtable_offset is not None: assert isinstance(loc, RegLoc) assert isinstance(loc_vtable, ImmedLoc) - self.mc.MOV_mi((loc.value, self.cpu.vtable_offset), loc_vtable.value) + self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable) # XXX genop_new is abused for all varsized mallocs with Boehm, for now # (instead of genop_new_array, genop_newstr, genop_newunicode) @@ -1123,7 +1122,7 @@ assert isinstance(baseofs, ImmedLoc) assert isinstance(scale_loc, ImmedLoc) dest_addr = AddressLoc(base_loc, ofs_loc, scale_loc.value, baseofs.value) - if op.args[2].type == FLOAT: + if op.getarg(2).type == FLOAT: self.mc.MOVSD(dest_addr, value_loc) else: if IS_X86_64 and scale_loc.value == 3: @@ -1219,7 +1218,7 @@ return addr def _gen_guard_overflow(self, guard_op, guard_token): - guard_opnum = guard_op.opnum + guard_opnum = guard_op.getopnum() if guard_opnum == rop.GUARD_NO_OVERFLOW: return self.implement_guard(guard_token, 'O') elif guard_opnum == rop.GUARD_OVERFLOW: @@ -1247,8 +1246,8 @@ genop_guard_guard_isnull = genop_guard_guard_false def genop_guard_guard_value(self, ign_1, guard_op, guard_token, locs, ign_2): - if guard_op.args[0].type == FLOAT: - assert guard_op.args[1].type == FLOAT + if guard_op.getarg(0).type == FLOAT: + assert guard_op.getarg(1).type == FLOAT self.mc.UCOMISD(locs[0], locs[1]) else: self.mc.CMP(locs[0], locs[1]) @@ -1639,8 +1638,8 @@ assert isinstance(sizeloc, ImmedLoc) size = sizeloc.value - if isinstance(op.args[0], Const): - x = imm(op.args[0].getint()) + if isinstance(op.getarg(0), Const): + x = imm(op.getarg(0).getint()) else: x = arglocs[1] if x is eax: @@ -1659,7 +1658,7 @@ def genop_guard_call_may_force(self, op, guard_op, guard_token, arglocs, result_loc): - faildescr = guard_op.descr + faildescr = guard_op.getdescr() fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) self.genop_call(op, arglocs, result_loc) @@ -1668,10 +1667,10 @@ def genop_guard_call_assembler(self, op, guard_op, guard_token, arglocs, result_loc): - faildescr = guard_op.descr + faildescr = guard_op.getdescr() fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) - descr = op.descr + descr = op.getdescr() assert isinstance(descr, LoopToken) assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) # @@ -1756,7 +1755,7 @@ def genop_discard_cond_call_gc_wb(self, op, arglocs): # use 'mc._mc' directly instead of 'mc', to avoid # bad surprizes if the code buffer is mostly full - descr = op.descr + descr = op.getdescr() if we_are_translated(): cls = self.cpu.gc_ll_descr.has_write_barrier_class() assert cls is not None and isinstance(descr, cls) @@ -1768,6 +1767,7 @@ jz_location = self.mc.get_relative_pos() # the following is supposed to be the slow path, so whenever possible # we choose the most compact encoding over the most efficient one. + # XXX improve a bit, particularly for IS_X86_64. for i in range(len(arglocs)-1, -1, -1): loc = arglocs[i] if isinstance(loc, RegLoc): @@ -1780,12 +1780,11 @@ self.mc.PUSH_i32(loc.getint()) if IS_X86_64: - # We clobber these registers to pass the arguments, but that's + # We clobber this register to pass the arguments, but that's # okay, because consider_cond_call_gc_wb makes sure that any # caller-save registers with values in them are present in arglocs, # so they are saved on the stack above and restored below self.mc.MOV_rs(edi.value, 0) - self.mc.MOV_rs(esi.value, 8) # misaligned stack in the call, but it's ok because the write barrier # is not going to call anything more. Also, this assumes that the @@ -1866,6 +1865,7 @@ offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, [chr(offset)]) + # on 64-bits, 'tid' is a value that fits in 31 bits self.mc.MOV_mi((eax.value, 0), tid) self.mc.MOV(heap(nursery_free_adr), edx) Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/codebuf.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/codebuf.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/codebuf.py Thu Sep 30 00:16:20 2010 @@ -1,6 +1,6 @@ import os, sys -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.jit.backend.x86.rx86 import X86_32_CodeBuilder, X86_64_CodeBuilder from pypy.jit.backend.x86.regloc import LocationCodeBuilder @@ -29,6 +29,9 @@ self._pos = 0 def overwrite(self, pos, listofchars): + """ Overwrite a specified position with a given list of chars + (position is relative + """ make_sure_not_resized(listofchars) assert pos + len(listofchars) <= self._size for c in listofchars: @@ -49,35 +52,38 @@ self.writechar(chr(n)) def get_relative_pos(self): + """ Current position, relative to code start + """ return self._pos def tell(self): + """ Tell the current address at machine code block + """ baseaddr = rffi.cast(lltype.Signed, self._data) return baseaddr + self._pos - def seekback(self, count): - pos = self._pos - count - self._pos = pos - self._last_dump_start = pos - def done(self): - # normally, no special action is needed here + """ Called at the end of writing of each piece of machine code. + Even though this function doesn't do much, it's extremely important + to call this for all tools to work, like valgrind or machine code + dumping + """ + self.valgrind_invalidated() if machine_code_dumper.enabled: machine_code_dumper.dump_range(self, self._last_dump_start, self._pos) self._last_dump_start = self._pos - def redone(self, frm, to): - if machine_code_dumper.enabled: - baseaddr = rffi.cast(lltype.Signed, self._data) - machine_code_dumper.dump_range(self, frm - baseaddr, to - baseaddr) - def log(self, msg): + """ Insert information into machine code dumper, if enabled + """ if machine_code_dumper.enabled: machine_code_dumper.dump(self, 'LOG', self._pos, msg) def valgrind_invalidated(self): - # mark the range of the InMemoryCodeBuilder as invalidated for Valgrind + """ Mark the range of the InMemoryCodeBuilder as invalidated + for Valgrind + """ from pypy.jit.backend.x86 import valgrind valgrind.discard_translations(self._data, self._size) @@ -146,7 +152,7 @@ # Hack to make sure that mcs are not within 32-bits of one # another for testing purposes from pypy.rlib.rmmap import hint - hint.pos += 0xFFFFFFFF + hint.pos += 0x80000000 - map_size self._init(data, map_size) @@ -158,6 +164,12 @@ # ____________________________________________________________ +memcpy_fn = rffi.llexternal('memcpy', [llmemory.Address, llmemory.Address, + rffi.SIZE_T], lltype.Void, + sandboxsafe=True, _nowrapper=True) + +# ____________________________________________________________ + if sys.platform == 'win32': ensure_sse2_floats = lambda : None else: Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py Thu Sep 30 00:16:20 2010 @@ -224,7 +224,7 @@ assert tmpreg not in nonfloatlocs assert xmmtmp not in floatlocs # note: we need to make a copy of inputargs because possibly_free_vars - # is also used on op.args, which is a non-resizable list + # is also used on op args, which is a non-resizable list self.possibly_free_vars(list(inputargs)) return nonfloatlocs, floatlocs @@ -234,6 +234,12 @@ else: self.rm.possibly_free_var(var) + def possibly_free_vars_for_op(self, op): + for i in range(op.numargs()): + var = op.getarg(i) + if var is not None: # xxx kludgy + self.possibly_free_var(var) + def possibly_free_vars(self, vars): for var in vars: if var is not None: # xxx kludgy @@ -262,12 +268,12 @@ selected_reg, need_lower_byte) def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.opnum != rop.JUMP or jump.descr is not looptoken: + if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: loop_consts = {} else: loop_consts = {} for i in range(len(inputargs)): - if inputargs[i] is jump.args[i]: + if inputargs[i] is jump.getarg(i): loop_consts[inputargs[i]] = i return loop_consts @@ -301,7 +307,7 @@ if reg not in used: self.xrm.free_regs.append(reg) # note: we need to make a copy of inputargs because possibly_free_vars - # is also used on op.args, which is a non-resizable list + # is also used on op args, which is a non-resizable list self.possibly_free_vars(list(inputargs)) self.rm._check_invariants() self.xrm._check_invariants() @@ -312,7 +318,7 @@ self.assembler.regalloc_perform(op, arglocs, result_loc) def locs_for_fail(self, guard_op): - return [self.loc(v) for v in guard_op.fail_args] + return [self.loc(v) for v in guard_op.getfailargs()] def perform_with_guard(self, op, guard_op, arglocs, result_loc): faillocs = self.locs_for_fail(guard_op) @@ -324,7 +330,7 @@ current_depths) if op.result is not None: self.possibly_free_var(op.result) - self.possibly_free_vars(guard_op.fail_args) + self.possibly_free_vars(guard_op.getfailargs()) def perform_guard(self, guard_op, arglocs, result_loc): faillocs = self.locs_for_fail(guard_op) @@ -338,7 +344,7 @@ self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs, result_loc, current_depths) - self.possibly_free_vars(guard_op.fail_args) + self.possibly_free_vars(guard_op.getfailargs()) def PerformDiscard(self, op, arglocs): if not we_are_translated(): @@ -346,24 +352,24 @@ self.assembler.regalloc_perform_discard(op, arglocs) def can_merge_with_next_guard(self, op, i, operations): - if op.opnum == rop.CALL_MAY_FORCE or op.opnum == rop.CALL_ASSEMBLER: - assert operations[i + 1].opnum == rop.GUARD_NOT_FORCED + if op.getopnum() == rop.CALL_MAY_FORCE or op.getopnum() == rop.CALL_ASSEMBLER: + assert operations[i + 1].getopnum() == rop.GUARD_NOT_FORCED return True if not op.is_comparison(): if op.is_ovf(): - if (operations[i + 1].opnum != rop.GUARD_NO_OVERFLOW and - operations[i + 1].opnum != rop.GUARD_OVERFLOW): + if (operations[i + 1].getopnum() != rop.GUARD_NO_OVERFLOW and + operations[i + 1].getopnum() != rop.GUARD_OVERFLOW): print "int_xxx_ovf not followed by guard_(no)_overflow" raise AssertionError return True return False - if (operations[i + 1].opnum != rop.GUARD_TRUE and - operations[i + 1].opnum != rop.GUARD_FALSE): + if (operations[i + 1].getopnum() != rop.GUARD_TRUE and + operations[i + 1].getopnum() != rop.GUARD_FALSE): return False - if operations[i + 1].args[0] is not op.result: + if operations[i + 1].getarg(0) is not op.result: return False if (self.longevity[op.result][1] > i + 1 or - op.result in operations[i + 1].fail_args): + op.result in operations[i + 1].getfailargs()): return False return True @@ -376,13 +382,13 @@ self.xrm.position = i if op.has_no_side_effect() and op.result not in self.longevity: i += 1 - self.possibly_free_vars(op.args) + self.possibly_free_vars_for_op(op) continue if self.can_merge_with_next_guard(op, i, operations): - oplist_with_guard[op.opnum](self, op, operations[i + 1]) + oplist_with_guard[op.getopnum()](self, op, operations[i + 1]) i += 1 else: - oplist[op.opnum](self, op) + oplist[op.getopnum()](self, op) if op.result is not None: self.possibly_free_var(op.result) self.rm._check_invariants() @@ -402,19 +408,20 @@ op = operations[i] if op.result is not None: start_live[op.result] = i - for arg in op.args: + for j in range(op.numargs()): + arg = op.getarg(j) if isinstance(arg, Box): if arg not in start_live: - print "Bogus arg in operation %d at %d" % (op.opnum, i) + print "Bogus arg in operation %d at %d" % (op.getopnum(), i) raise AssertionError longevity[arg] = (start_live[arg], i) if op.is_guard(): - for arg in op.fail_args: + for arg in op.getfailargs(): if arg is None: # hole continue assert isinstance(arg, Box) if arg not in start_live: - print "Bogus arg in guard %d at %d" % (op.opnum, i) + print "Bogus arg in guard %d at %d" % (op.getopnum(), i) raise AssertionError longevity[arg] = (start_live[arg], i) for arg in inputargs: @@ -432,9 +439,9 @@ return self.rm.loc(v) def _consider_guard(self, op): - loc = self.rm.make_sure_var_in_reg(op.args[0]) + loc = self.rm.make_sure_var_in_reg(op.getarg(0)) self.perform_guard(op, [loc], None) - self.rm.possibly_free_var(op.args[0]) + self.rm.possibly_free_var(op.getarg(0)) consider_guard_true = _consider_guard consider_guard_false = _consider_guard @@ -442,52 +449,54 @@ consider_guard_isnull = _consider_guard def consider_finish(self, op): - locs = [self.loc(v) for v in op.args] - locs_are_ref = [v.type == REF for v in op.args] - fail_index = self.assembler.cpu.get_fail_descr_number(op.descr) + locs = [self.loc(op.getarg(i)) for i in range(op.numargs())] + locs_are_ref = [op.getarg(i).type == REF for i in range(op.numargs())] + fail_index = self.assembler.cpu.get_fail_descr_number(op.getdescr()) self.assembler.generate_failure(fail_index, locs, self.exc, locs_are_ref) - self.possibly_free_vars(op.args) + self.possibly_free_vars_for_op(op) def consider_guard_no_exception(self, op): self.perform_guard(op, [], None) def consider_guard_exception(self, op): - loc = self.rm.make_sure_var_in_reg(op.args[0]) + loc = self.rm.make_sure_var_in_reg(op.getarg(0)) box = TempBox() - loc1 = self.rm.force_allocate_reg(box, op.args) + args = op.getarglist() + loc1 = self.rm.force_allocate_reg(box, args) if op.result in self.longevity: # this means, is it ever used - resloc = self.rm.force_allocate_reg(op.result, op.args + [box]) + resloc = self.rm.force_allocate_reg(op.result, args + [box]) else: resloc = None self.perform_guard(op, [loc, loc1], resloc) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) self.rm.possibly_free_var(box) consider_guard_no_overflow = consider_guard_no_exception consider_guard_overflow = consider_guard_no_exception def consider_guard_value(self, op): - x = self.make_sure_var_in_reg(op.args[0]) - y = self.loc(op.args[1]) + x = self.make_sure_var_in_reg(op.getarg(0)) + y = self.loc(op.getarg(1)) self.perform_guard(op, [x, y], None) - self.possibly_free_vars(op.args) + self.possibly_free_vars_for_op(op) def consider_guard_class(self, op): - assert isinstance(op.args[0], Box) - x = self.rm.make_sure_var_in_reg(op.args[0]) - y = self.loc(op.args[1]) + assert isinstance(op.getarg(0), Box) + x = self.rm.make_sure_var_in_reg(op.getarg(0)) + y = self.loc(op.getarg(1)) self.perform_guard(op, [x, y], None) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) consider_guard_nonnull_class = consider_guard_class def _consider_binop_part(self, op): - x = op.args[0] - argloc = self.loc(op.args[1]) - loc = self.rm.force_result_in_reg(op.result, x, op.args) - self.rm.possibly_free_var(op.args[1]) + x = op.getarg(0) + argloc = self.loc(op.getarg(1)) + args = op.getarglist() + loc = self.rm.force_result_in_reg(op.result, x, args) + self.rm.possibly_free_var(op.getarg(1)) return loc, argloc def _consider_binop(self, op): @@ -510,26 +519,27 @@ consider_int_add_ovf = _consider_binop_with_guard def consider_int_neg(self, op): - res = self.rm.force_result_in_reg(op.result, op.args[0]) + res = self.rm.force_result_in_reg(op.result, op.getarg(0)) self.Perform(op, [res], res) consider_int_invert = consider_int_neg def consider_int_lshift(self, op): - if isinstance(op.args[1], Const): - loc2 = self.rm.convert_to_imm(op.args[1]) + if isinstance(op.getarg(1), Const): + loc2 = self.rm.convert_to_imm(op.getarg(1)) else: - loc2 = self.rm.make_sure_var_in_reg(op.args[1], selected_reg=ecx) - loc1 = self.rm.force_result_in_reg(op.result, op.args[0], op.args) + loc2 = self.rm.make_sure_var_in_reg(op.getarg(1), selected_reg=ecx) + args = op.getarglist() + loc1 = self.rm.force_result_in_reg(op.result, op.getarg(0), args) self.Perform(op, [loc1, loc2], loc1) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) consider_int_rshift = consider_int_lshift consider_uint_rshift = consider_int_lshift def _consider_int_div_or_mod(self, op, resultreg, trashreg): - l0 = self.rm.make_sure_var_in_reg(op.args[0], selected_reg=eax) - l1 = self.rm.make_sure_var_in_reg(op.args[1], selected_reg=ecx) + l0 = self.rm.make_sure_var_in_reg(op.getarg(0), selected_reg=eax) + l1 = self.rm.make_sure_var_in_reg(op.getarg(1), selected_reg=ecx) l2 = self.rm.force_allocate_reg(op.result, selected_reg=resultreg) # the register (eax or edx) not holding what we are looking for # will be just trash after that operation @@ -538,7 +548,7 @@ assert l0 is eax assert l1 is ecx assert l2 is resultreg - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) self.rm.possibly_free_var(tmpvar) def consider_int_mod(self, op): @@ -552,17 +562,18 @@ consider_uint_floordiv = consider_int_floordiv def _consider_compop(self, op, guard_op): - vx = op.args[0] - vy = op.args[1] + vx = op.getarg(0) + vy = op.getarg(1) arglocs = [self.loc(vx), self.loc(vy)] if (vx in self.rm.reg_bindings or vy in self.rm.reg_bindings or isinstance(vx, Const) or isinstance(vy, Const)): pass else: arglocs[0] = self.rm.make_sure_var_in_reg(vx) - self.rm.possibly_free_vars(op.args) + args = op.getarglist() + self.rm.possibly_free_vars(args) if guard_op is None: - loc = self.rm.force_allocate_reg(op.result, op.args, + loc = self.rm.force_allocate_reg(op.result, args, need_lower_byte=True) self.Perform(op, arglocs, loc) else: @@ -582,10 +593,11 @@ consider_ptr_ne = _consider_compop def _consider_float_op(self, op): - loc1 = self.xrm.loc(op.args[1]) - loc0 = self.xrm.force_result_in_reg(op.result, op.args[0], op.args) + loc1 = self.xrm.loc(op.getarg(1)) + args = op.getarglist() + loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0), args) self.Perform(op, [loc0, loc1], loc0) - self.xrm.possibly_free_vars(op.args) + self.xrm.possibly_free_vars_for_op(op) consider_float_add = _consider_float_op consider_float_sub = _consider_float_op @@ -593,11 +605,12 @@ consider_float_truediv = _consider_float_op def _consider_float_cmp(self, op, guard_op): - loc0 = self.xrm.make_sure_var_in_reg(op.args[0], op.args, + args = op.getarglist() + loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0), args, imm_fine=False) - loc1 = self.xrm.loc(op.args[1]) + loc1 = self.xrm.loc(op.getarg(1)) arglocs = [loc0, loc1] - self.xrm.possibly_free_vars(op.args) + self.xrm.possibly_free_vars_for_op(op) if guard_op is None: res = self.rm.force_allocate_reg(op.result, need_lower_byte=True) self.Perform(op, arglocs, res) @@ -612,26 +625,26 @@ consider_float_ge = _consider_float_cmp def consider_float_neg(self, op): - loc0 = self.xrm.force_result_in_reg(op.result, op.args[0]) + loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0)) self.Perform(op, [loc0], loc0) - self.xrm.possibly_free_var(op.args[0]) + self.xrm.possibly_free_var(op.getarg(0)) def consider_float_abs(self, op): - loc0 = self.xrm.force_result_in_reg(op.result, op.args[0]) + loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0)) self.Perform(op, [loc0], loc0) - self.xrm.possibly_free_var(op.args[0]) + self.xrm.possibly_free_var(op.getarg(0)) def consider_cast_float_to_int(self, op): - loc0 = self.xrm.make_sure_var_in_reg(op.args[0], imm_fine=False) + loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0), imm_fine=False) loc1 = self.rm.force_allocate_reg(op.result) self.Perform(op, [loc0], loc1) - self.xrm.possibly_free_var(op.args[0]) + self.xrm.possibly_free_var(op.getarg(0)) def consider_cast_int_to_float(self, op): - loc0 = self.rm.loc(op.args[0]) + loc0 = self.rm.loc(op.getarg(0)) loc1 = self.xrm.force_allocate_reg(op.result) self.Perform(op, [loc0], loc1) - self.rm.possibly_free_var(op.args[0]) + self.rm.possibly_free_var(op.getarg(0)) def _call(self, op, arglocs, force_store=[], guard_not_forced_op=None): save_all_regs = guard_not_forced_op is not None @@ -650,11 +663,11 @@ self.Perform(op, arglocs, resloc) def _consider_call(self, op, guard_not_forced_op=None): - calldescr = op.descr + calldescr = op.getdescr() assert isinstance(calldescr, BaseCallDescr) - assert len(calldescr.arg_classes) == len(op.args) - 1 + assert len(calldescr.arg_classes) == op.numargs() - 1 size = calldescr.get_result_size(self.translate_support_code) - self._call(op, [imm(size)] + [self.loc(arg) for arg in op.args], + self._call(op, [imm(size)] + [self.loc(op.getarg(i)) for i in range(op.numargs())], guard_not_forced_op=guard_not_forced_op) def consider_call(self, op): @@ -665,30 +678,27 @@ self._consider_call(op, guard_op) def consider_call_assembler(self, op, guard_op): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, LoopToken) jd = descr.outermost_jitdriver_sd assert jd is not None size = jd.portal_calldescr.get_result_size(self.translate_support_code) vable_index = jd.index_of_virtualizable if vable_index >= 0: - self.rm._sync_var(op.args[vable_index]) - vable = self.fm.loc(op.args[vable_index]) + self.rm._sync_var(op.getarg(vable_index)) + vable = self.fm.loc(op.getarg(vable_index)) else: vable = imm(0) self._call(op, [imm(size), vable] + - [self.loc(arg) for arg in op.args], + [self.loc(op.getarg(i)) for i in range(op.numargs())], guard_not_forced_op=guard_op) def consider_cond_call_gc_wb(self, op): assert op.result is None - loc_newvalue = self.rm.make_sure_var_in_reg(op.args[1], op.args) - # ^^^ we force loc_newvalue in a reg (unless it's a Const), - # because it will be needed anyway by the following setfield_gc. - # It avoids loading it twice from the memory. - loc_base = self.rm.make_sure_var_in_reg(op.args[0], op.args, + args = op.getarglist() + loc_base = self.rm.make_sure_var_in_reg(op.getarg(0), args, imm_fine=False) - arglocs = [loc_base, loc_newvalue] + arglocs = [loc_base] # add eax, ecx and edx as extra "arguments" to ensure they are # saved and restored. Fish in self.rm to know which of these # registers really need to be saved (a bit of a hack). Moreover, @@ -700,7 +710,7 @@ and self.rm.stays_alive(v)): arglocs.append(reg) self.PerformDiscard(op, arglocs) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) def _fastpath_malloc(self, op, descr): assert isinstance(descr, BaseSizeDescr) @@ -725,15 +735,15 @@ def consider_new(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr - if gc_ll_descr.can_inline_malloc(op.descr): - self._fastpath_malloc(op, op.descr) + if gc_ll_descr.can_inline_malloc(op.getdescr()): + self._fastpath_malloc(op, op.getdescr()) else: - args = gc_ll_descr.args_for_new(op.descr) + args = gc_ll_descr.args_for_new(op.getdescr()) arglocs = [imm(x) for x in args] return self._call(op, arglocs) def consider_new_with_vtable(self, op): - classint = op.args[0].getint() + classint = op.getarg(0).getint() descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): self._fastpath_malloc(op, descrsize) @@ -742,34 +752,34 @@ else: args = self.assembler.cpu.gc_ll_descr.args_for_new(descrsize) arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.args[0])) + arglocs.append(self.loc(op.getarg(0))) return self._call(op, arglocs) def consider_newstr(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newstr is not None: # framework GC - loc = self.loc(op.args[0]) + loc = self.loc(op.getarg(0)) return self._call(op, [loc]) # boehm GC (XXX kill the following code at some point) ofs_items, itemsize, ofs = symbolic.get_array_token(rstr.STR, self.translate_support_code) assert itemsize == 1 - return self._malloc_varsize(ofs_items, ofs, 0, op.args[0], + return self._malloc_varsize(ofs_items, ofs, 0, op.getarg(0), op.result) def consider_newunicode(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newunicode is not None: # framework GC - loc = self.loc(op.args[0]) + loc = self.loc(op.getarg(0)) return self._call(op, [loc]) # boehm GC (XXX kill the following code at some point) ofs_items, itemsize, ofs = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code) if itemsize == 4: - return self._malloc_varsize(ofs_items, ofs, 2, op.args[0], + return self._malloc_varsize(ofs_items, ofs, 2, op.getarg(0), op.result) elif itemsize == 2: - return self._malloc_varsize(ofs_items, ofs, 1, op.args[0], + return self._malloc_varsize(ofs_items, ofs, 1, op.getarg(0), op.result) else: assert False, itemsize @@ -784,7 +794,7 @@ else: tempbox = None other_loc = imm(ofs_items + (v.getint() << scale)) - self._call(ResOperation(rop.NEW, [v], res_v), + self._call(ResOperation(rop.NEW, [], res_v), [other_loc], [v]) loc = self.rm.make_sure_var_in_reg(v, [res_v]) assert self.loc(res_v) == eax @@ -792,22 +802,22 @@ self.rm.possibly_free_var(v) if tempbox is not None: self.rm.possibly_free_var(tempbox) - self.PerformDiscard(ResOperation(rop.SETFIELD_GC, [], None), + self.PerformDiscard(ResOperation(rop.SETFIELD_GC, [None, None], None), [eax, imm(ofs_length), imm(WORD), loc]) def consider_new_array(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newarray is not None: # framework GC - args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.descr) + args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.getdescr()) arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.args[0])) + arglocs.append(self.loc(op.getarg(0))) return self._call(op, arglocs) # boehm GC (XXX kill the following code at some point) scale_of_field, basesize, ofs_length, _ = ( - self._unpack_arraydescr(op.descr)) + self._unpack_arraydescr(op.getdescr())) return self._malloc_varsize(basesize, ofs_length, scale_of_field, - op.args[0], op.result) + op.getarg(0), op.result) def _unpack_arraydescr(self, arraydescr): assert isinstance(arraydescr, BaseArrayDescr) @@ -829,50 +839,54 @@ return imm(ofs), imm(size), ptr def consider_setfield_gc(self, op): - ofs_loc, size_loc, ptr = self._unpack_fielddescr(op.descr) + ofs_loc, size_loc, ptr = self._unpack_fielddescr(op.getdescr()) assert isinstance(size_loc, ImmedLoc) if size_loc.value == 1: need_lower_byte = True else: need_lower_byte = False - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - value_loc = self.make_sure_var_in_reg(op.args[1], op.args, + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + value_loc = self.make_sure_var_in_reg(op.getarg(1), args, need_lower_byte=need_lower_byte) - self.possibly_free_vars(op.args) + self.possibly_free_vars(args) self.PerformDiscard(op, [base_loc, ofs_loc, size_loc, value_loc]) consider_setfield_raw = consider_setfield_gc def consider_strsetitem(self, op): - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args) - value_loc = self.rm.make_sure_var_in_reg(op.args[2], op.args, + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) + value_loc = self.rm.make_sure_var_in_reg(op.getarg(2), args, need_lower_byte=True) - self.rm.possibly_free_vars(op.args) + self.rm.possibly_free_vars_for_op(op) self.PerformDiscard(op, [base_loc, ofs_loc, value_loc]) consider_unicodesetitem = consider_strsetitem def consider_setarrayitem_gc(self, op): - scale, ofs, _, ptr = self._unpack_arraydescr(op.descr) - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) + scale, ofs, _, ptr = self._unpack_arraydescr(op.getdescr()) + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) if scale == 0: need_lower_byte = True else: need_lower_byte = False - value_loc = self.make_sure_var_in_reg(op.args[2], op.args, + value_loc = self.make_sure_var_in_reg(op.getarg(2), args, need_lower_byte=need_lower_byte) - ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args) - self.possibly_free_vars(op.args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) + self.possibly_free_vars(args) self.PerformDiscard(op, [base_loc, ofs_loc, value_loc, imm(scale), imm(ofs)]) consider_setarrayitem_raw = consider_setarrayitem_gc def consider_getfield_gc(self, op): - ofs_loc, size_loc, _ = self._unpack_fielddescr(op.descr) - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - self.rm.possibly_free_vars(op.args) + ofs_loc, size_loc, _ = self._unpack_fielddescr(op.getdescr()) + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + self.rm.possibly_free_vars(args) result_loc = self.force_allocate_reg(op.result) self.Perform(op, [base_loc, ofs_loc, size_loc], result_loc) @@ -881,10 +895,11 @@ consider_getfield_gc_pure = consider_getfield_gc def consider_getarrayitem_gc(self, op): - scale, ofs, _, _ = self._unpack_arraydescr(op.descr) - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args) - self.rm.possibly_free_vars(op.args) + scale, ofs, _, _ = self._unpack_arraydescr(op.getdescr()) + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) + self.rm.possibly_free_vars_for_op(op) result_loc = self.force_allocate_reg(op.result) self.Perform(op, [base_loc, ofs_loc, imm(scale), imm(ofs)], result_loc) @@ -893,8 +908,8 @@ def consider_int_is_true(self, op, guard_op): # doesn't need arg to be in a register - argloc = self.loc(op.args[0]) - self.rm.possibly_free_var(op.args[0]) + argloc = self.loc(op.getarg(0)) + self.rm.possibly_free_var(op.getarg(0)) if guard_op is not None: self.perform_with_guard(op, guard_op, [argloc], None) else: @@ -904,42 +919,81 @@ consider_int_is_zero = consider_int_is_true def consider_same_as(self, op): - argloc = self.loc(op.args[0]) - self.possibly_free_var(op.args[0]) + argloc = self.loc(op.getarg(0)) + self.possibly_free_var(op.getarg(0)) resloc = self.force_allocate_reg(op.result) self.Perform(op, [argloc], resloc) #consider_cast_ptr_to_int = consider_same_as def consider_strlen(self, op): - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - self.rm.possibly_free_vars(op.args) + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + self.rm.possibly_free_vars_for_op(op) result_loc = self.rm.force_allocate_reg(op.result) self.Perform(op, [base_loc], result_loc) consider_unicodelen = consider_strlen def consider_arraylen_gc(self, op): - arraydescr = op.descr + arraydescr = op.getdescr() assert isinstance(arraydescr, BaseArrayDescr) ofs = arraydescr.get_ofs_length(self.translate_support_code) - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - self.rm.possibly_free_vars(op.args) + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + self.rm.possibly_free_vars_for_op(op) result_loc = self.rm.force_allocate_reg(op.result) self.Perform(op, [base_loc, imm(ofs)], result_loc) def consider_strgetitem(self, op): - base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args) - ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args) - self.rm.possibly_free_vars(op.args) + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) + self.rm.possibly_free_vars_for_op(op) result_loc = self.rm.force_allocate_reg(op.result) self.Perform(op, [base_loc, ofs_loc], result_loc) consider_unicodegetitem = consider_strgetitem + def consider_copystrcontent(self, op): + # compute the source address + args = op.getarglist() + base_loc = self.rm.make_sure_var_in_reg(args[0], args) + ofs_loc = self.rm.make_sure_var_in_reg(args[2], args) + self.rm.possibly_free_var(args[0]) + self.rm.possibly_free_var(args[2]) + srcaddr_box = TempBox() + srcaddr_loc = self.rm.force_allocate_reg(srcaddr_box) + self._gen_address_inside_string(base_loc, ofs_loc, srcaddr_loc) + # compute the destination address + base_loc = self.rm.make_sure_var_in_reg(args[1], args) + ofs_loc = self.rm.make_sure_var_in_reg(args[3], args) + self.rm.possibly_free_var(args[1]) + self.rm.possibly_free_var(args[3]) + dstaddr_box = TempBox() + dstaddr_loc = self.rm.force_allocate_reg(dstaddr_box) + self._gen_address_inside_string(base_loc, ofs_loc, dstaddr_loc) + # call memcpy() + length_loc = self.loc(args[4]) + self.rm.before_call() + self.xrm.before_call() + self.assembler._emit_call(imm(self.assembler.memcpy_addr), + [dstaddr_loc, srcaddr_loc, length_loc]) + self.rm.possibly_free_var(args[4]) + self.rm.possibly_free_var(dstaddr_box) + self.rm.possibly_free_var(srcaddr_box) + + def _gen_address_inside_string(self, baseloc, ofsloc, resloc): + cpu = self.assembler.cpu + ofs_items, itemsize, _ = symbolic.get_array_token(rstr.STR, + self.translate_support_code) + assert itemsize == 1 + self.assembler.load_effective_addr(ofsloc, ofs_items, 0, + resloc, baseloc) + def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None - descr = op.descr + descr = op.getdescr() assert isinstance(descr, LoopToken) self.jump_target_descr = descr nonfloatlocs, floatlocs = assembler.target_arglocs(self.jump_target_descr) @@ -951,17 +1005,20 @@ xmmtmp = X86XMMRegisterManager.all_regs[0] xmmtmploc = self.xrm.force_allocate_reg(box1, selected_reg=xmmtmp) # Part about non-floats - src_locations = [self.loc(arg) for arg in op.args if arg.type != FLOAT] + # XXX we don't need a copy, we only just the original list + src_locations = [self.loc(op.getarg(i)) for i in range(op.numargs()) + if op.getarg(i).type != FLOAT] assert tmploc not in nonfloatlocs dst_locations = [loc for loc in nonfloatlocs if loc is not None] remap_frame_layout(assembler, src_locations, dst_locations, tmploc) # Part about floats - src_locations = [self.loc(arg) for arg in op.args if arg.type == FLOAT] + src_locations = [self.loc(op.getarg(i)) for i in range(op.numargs()) + if op.getarg(i).type == FLOAT] dst_locations = [loc for loc in floatlocs if loc is not None] remap_frame_layout(assembler, src_locations, dst_locations, xmmtmp) self.rm.possibly_free_var(box) self.xrm.possibly_free_var(box1) - self.possibly_free_vars(op.args) + self.possibly_free_vars_for_op(op) assembler.closing_jump(self.jump_target_descr) def consider_debug_merge_point(self, op): @@ -1002,12 +1059,21 @@ def add_none_argument(fn): return lambda self, op: fn(self, op, None) +def is_comparison_or_ovf_op(opnum): + from pypy.jit.metainterp.resoperation import opclasses, AbstractResOp + cls = opclasses[opnum] + # hack hack: in theory they are instance method, but they don't use + # any instance field, we can use a fake object + class Fake(cls): + pass + op = Fake(None) + return op.is_comparison() or op.is_ovf() + for name, value in RegAlloc.__dict__.iteritems(): if name.startswith('consider_'): name = name[len('consider_'):] num = getattr(rop, name.upper()) - if (ResOperation(num, [], None).is_comparison() - or ResOperation(num, [], None).is_ovf() + if (is_comparison_or_ovf_op(num) or num == rop.CALL_MAY_FORCE or num == rop.CALL_ASSEMBLER): oplist_with_guard[num] = value oplist[num] = add_none_argument(value) Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/regloc.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/regloc.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/regloc.py Thu Sep 30 00:16:20 2010 @@ -33,6 +33,8 @@ def value_a(self): raise AssertionError("value_a undefined") def value_m(self): raise AssertionError("value_m undefined") + def find_unused_reg(self): return eax + class StackLoc(AssemblerLocation): _immutable_ = True def __init__(self, position, ebp_offset, num_words, type): @@ -88,6 +90,12 @@ def assembler(self): return '%' + repr(self) + def find_unused_reg(self): + if self.value == eax.value: + return edx + else: + return eax + class ImmedLoc(AssemblerLocation): _immutable_ = True width = WORD @@ -137,6 +145,12 @@ self._location_code = 'a' self.loc_a = (base_loc.value, scaled_loc.value, scale, static_offset) + def __repr__(self): + dict = {'j': 'value', 'a': 'loc_a', 'm': 'loc_m', 'a':'loc_a'} + attr = dict.get(self._location_code, '?') + info = getattr(self, attr, '?') + return '' % (self._location_code, info) + def location_code(self): return self._location_code @@ -146,6 +160,21 @@ def value_m(self): return self.loc_m + def find_unused_reg(self): + if self._location_code == 'm': + if self.loc_m[0] == eax.value: + return edx + elif self._location_code == 'a': + if self.loc_a[0] == eax.value: + if self.loc_a[1] == edx.value: + return ecx + return edx + if self.loc_a[1] == eax.value: + if self.loc_a[0] == edx.value: + return ecx + return edx + return eax + class ConstFloatLoc(AssemblerLocation): # XXX: We have to use this class instead of just AddressLoc because # AddressLoc is "untyped" and also we to have need some sort of unique @@ -159,6 +188,9 @@ self.value = address self.const_id = const_id + def __repr__(self): + return '' % (self.value, self.const_id) + def _getregkey(self): # XXX: 1000 is kind of magic: We just don't want to be confused # with any registers @@ -206,6 +238,32 @@ _scratch_register_value = 0 def _binaryop(name): + + def insn_with_64_bit_immediate(self, loc1, loc2): + # These are the worst cases: + val2 = loc2.value_i() + code1 = loc1.location_code() + if (code1 == 'j' + or (code1 == 'm' and not rx86.fits_in_32bits(loc1.value_m()[1])) + or (code1 == 'a' and not rx86.fits_in_32bits(loc1.value_a()[3]))): + # INSN_ji, and both operands are 64-bit; or INSN_mi or INSN_ai + # and the constant offset in the address is 64-bit. + # Hopefully this doesn't happen too often + freereg = loc1.find_unused_reg() + self.PUSH_r(freereg.value) + self.MOV_ri(freereg.value, val2) + INSN(self, loc1, freereg) + self.POP_r(freereg.value) + else: + # For this case, we should not need the scratch register more than here. + self._load_scratch(val2) + INSN(self, loc1, X86_64_SCRATCH_REG) + + def invoke(self, codes, val1, val2): + methname = name + "_" + codes + _rx86_getattr(self, methname)(val1, val2) + invoke._annspecialcase_ = 'specialize:arg(1)' + def INSN(self, loc1, loc2): code1 = loc1.location_code() code2 = loc2.location_code() @@ -218,38 +276,39 @@ if loc1 is X86_64_SCRATCH_REG and not name.startswith("MOV"): assert code2 not in ('j', 'i') - for possible_code1 in unrolling_location_codes: - if code1 == possible_code1: - for possible_code2 in unrolling_location_codes: - if code2 == possible_code2: + for possible_code2 in unrolling_location_codes: + if code2 == possible_code2: + val2 = getattr(loc2, "value_" + possible_code2)() + # + # Fake out certain operations for x86_64 + if self.WORD == 8 and possible_code2 == 'i' and not rx86.fits_in_32bits(val2): + insn_with_64_bit_immediate(self, loc1, loc2) + return + # + # Regular case + for possible_code1 in unrolling_location_codes: + if code1 == possible_code1: val1 = getattr(loc1, "value_" + possible_code1)() - val2 = getattr(loc2, "value_" + possible_code2)() - # Fake out certain operations for x86_64 - if self.WORD == 8 and possible_code2 == 'i' and not rx86.fits_in_32bits(val2): - if possible_code1 == 'j': - # This is the worst case: INSN_ji, and both operands are 64-bit - # Hopefully this doesn't happen too often - self.PUSH_r(eax.value) - self.MOV_ri(eax.value, val1) - self.MOV_ri(X86_64_SCRATCH_REG.value, val2) - methname = name + "_mr" - _rx86_getattr(self, methname)((eax.value, 0), X86_64_SCRATCH_REG.value) - self.POP_r(eax.value) - else: - self.MOV_ri(X86_64_SCRATCH_REG.value, val2) - methname = name + "_" + possible_code1 + "r" - _rx86_getattr(self, methname)(val1, X86_64_SCRATCH_REG.value) - elif self.WORD == 8 and possible_code1 == 'j': - reg_offset = self._addr_as_reg_offset(val1) - methname = name + "_" + "m" + possible_code2 - _rx86_getattr(self, methname)(reg_offset, val2) + # More faking out of certain operations for x86_64 + if self.WORD == 8 and possible_code1 == 'j': + val1 = self._addr_as_reg_offset(val1) + invoke(self, "m" + possible_code2, val1, val2) elif self.WORD == 8 and possible_code2 == 'j': - reg_offset = self._addr_as_reg_offset(val2) - methname = name + "_" + possible_code1 + "m" - _rx86_getattr(self, methname)(val1, reg_offset) + val2 = self._addr_as_reg_offset(val2) + invoke(self, possible_code1 + "m", val1, val2) + elif possible_code1 == 'm' and not rx86.fits_in_32bits(val1[1]): + val1 = self._fix_static_offset_64_m(val1) + invoke(self, "a" + possible_code2, val1, val2) + elif possible_code2 == 'm' and not rx86.fits_in_32bits(val2[1]): + val2 = self._fix_static_offset_64_m(val2) + invoke(self, possible_code1 + "a", val1, val2) else: - methname = name + "_" + possible_code1 + possible_code2 - _rx86_getattr(self, methname)(val1, val2) + if possible_code1 == 'a' and not rx86.fits_in_32bits(val1[3]): + val1 = self._fix_static_offset_64_a(val1) + if possible_code2 == 'a' and not rx86.fits_in_32bits(val2[3]): + val2 = self._fix_static_offset_64_a(val2) + invoke(self, possible_code1 + possible_code2, val1, val2) + return return func_with_new_name(INSN, "INSN_" + name) @@ -260,7 +319,7 @@ if code == possible_code: val = getattr(loc, "value_" + possible_code)() if self.WORD == 8 and possible_code == 'i' and not rx86.fits_in_32bits(val): - self.MOV_ri(X86_64_SCRATCH_REG.value, val) + self._load_scratch(val) _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value) else: methname = name + "_" + possible_code @@ -280,7 +339,7 @@ _rx86_getattr(self, name + "_l")(val) else: assert self.WORD == 8 - self.MOV_ri(X86_64_SCRATCH_REG.value, val) + self._load_scratch(val) _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value) else: methname = name + "_" + possible_code @@ -317,6 +376,40 @@ self.MOV_ri(X86_64_SCRATCH_REG.value, addr) return (X86_64_SCRATCH_REG.value, 0) + def _fix_static_offset_64_m(self, (basereg, static_offset)): + # For cases where an AddressLoc has the location_code 'm', but + # where the static offset does not fit in 32-bits. We have to fall + # back to the X86_64_SCRATCH_REG. Note that this returns a location + # encoded as mode 'a'. These are all possibly rare cases; don't try + # to reuse a past value of the scratch register at all. + self._scratch_register_known = False + self.MOV_ri(X86_64_SCRATCH_REG.value, static_offset) + return (basereg, X86_64_SCRATCH_REG.value, 0, 0) + + def _fix_static_offset_64_a(self, (basereg, scalereg, + scale, static_offset)): + # For cases where an AddressLoc has the location_code 'a', but + # where the static offset does not fit in 32-bits. We have to fall + # back to the X86_64_SCRATCH_REG. In one case it is even more + # annoying. These are all possibly rare cases; don't try to reuse a + # past value of the scratch register at all. + self._scratch_register_known = False + self.MOV_ri(X86_64_SCRATCH_REG.value, static_offset) + # + if basereg != rx86.NO_BASE_REGISTER: + self.LEA_ra(X86_64_SCRATCH_REG.value, + (basereg, X86_64_SCRATCH_REG.value, 0, 0)) + return (X86_64_SCRATCH_REG.value, scalereg, scale, 0) + + def _load_scratch(self, value): + if (self._scratch_register_known + and value == self._scratch_register_value): + return + if self._reuse_scratch_register: + self._scratch_register_known = True + self._scratch_register_value = value + self.MOV_ri(X86_64_SCRATCH_REG.value, value) + def begin_reuse_scratch_register(self): # Flag the beginning of a block where it is okay to reuse the value # of the scratch register. In theory we shouldn't have to do this if Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/rx86.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/rx86.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/rx86.py Thu Sep 30 00:16:20 2010 @@ -506,6 +506,7 @@ LEA32_rb = insn(rex_w, '\x8D', register(1,8),stack_bp(2,force_32bits=True)) LEA_ra = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_scaled_reg_plus_const(2)) LEA_rm = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_const(2)) + LEA_rj = insn(rex_w, '\x8D', register(1, 8), '\x05', immediate(2)) CALL_l = insn('\xE8', relative(1)) CALL_r = insn(rex_nw, '\xFF', register(1), chr(0xC0 | (2<<3))) Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/conftest.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/test/conftest.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/conftest.py Thu Sep 30 00:16:20 2010 @@ -5,3 +5,6 @@ def pytest_runtest_setup(item): if cpu not in ('x86', 'x86_64'): py.test.skip("x86/x86_64 tests skipped: cpu is %r" % (cpu,)) + if cpu == 'x86_64': + from pypy.rpython.lltypesystem import ll2ctypes + ll2ctypes.do_allocation_in_far_regions() Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_recompilation.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_recompilation.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_recompilation.py Thu Sep 30 00:16:20 2010 @@ -47,7 +47,7 @@ finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - descr = loop.operations[2].descr + descr = loop.operations[2].getdescr() new = descr._x86_bridge_frame_depth assert descr._x86_bridge_param_depth == 0 # XXX: Maybe add enough ops to force stack on 64-bit as well? @@ -114,8 +114,8 @@ assert loop.token._x86_param_depth == 0 # XXX: Maybe add enough ops to force stack on 64-bit as well? if IS_X86_32: - assert guard_op.descr._x86_bridge_frame_depth > loop_frame_depth - assert guard_op.descr._x86_bridge_param_depth == 0 + assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth + assert guard_op.getdescr()._x86_bridge_param_depth == 0 self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 0) self.cpu.set_future_value_int(2, 0) Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regalloc.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regalloc.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regalloc.py Thu Sep 30 00:16:20 2010 @@ -9,7 +9,7 @@ from pypy.jit.backend.llsupport.descr import GcCache from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.regalloc import RegAlloc, X86RegisterManager,\ - FloatConstants + FloatConstants, is_comparison_or_ovf_op from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64 from pypy.jit.metainterp.test.oparser import parse from pypy.rpython.lltypesystem import lltype, llmemory, rffi @@ -17,6 +17,11 @@ from pypy.rpython.lltypesystem import rclass, rstr from pypy.jit.backend.x86.rx86 import * +def test_is_comparison_or_ovf_op(): + assert not is_comparison_or_ovf_op(rop.INT_ADD) + assert is_comparison_or_ovf_op(rop.INT_ADD_OVF) + assert is_comparison_or_ovf_op(rop.INT_EQ) + CPU = getcpuclass() class MockGcDescr(GcCache): def get_funcptr_for_new(self): @@ -159,8 +164,8 @@ assert guard_op.is_guard() bridge = self.parse(ops, **kwds) assert ([box.type for box in bridge.inputargs] == - [box.type for box in guard_op.fail_args]) - faildescr = guard_op.descr + [box.type for box in guard_op.getfailargs()]) + faildescr = guard_op.getdescr() self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations) return bridge @@ -607,7 +612,7 @@ ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].descr._x86_bridge_param_depth == self.expected_param_depth(2) + assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) self.cpu.set_future_value_int(0, 4) self.cpu.set_future_value_int(1, 7) @@ -630,7 +635,7 @@ ''' bridge = self.attach_bridge(ops, loop, -2) - assert loop.operations[-2].descr._x86_bridge_param_depth == self.expected_param_depth(2) + assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) self.cpu.set_future_value_int(0, 4) self.cpu.set_future_value_int(1, 7) Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regloc.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regloc.py (original) +++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regloc.py Thu Sep 30 00:16:20 2010 @@ -58,23 +58,250 @@ expected_ofs = pos_addr - (neg_addr+5) assert s.getvalue() == '\xE9' + struct.pack("= self.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE def effectinfo_from_writeanalyze(effects, cpu, - extraeffect=EffectInfo.EF_CAN_RAISE): + extraeffect=EffectInfo.EF_CAN_RAISE, + oopspecindex=EffectInfo.OS_NONE): from pypy.translator.backendopt.writeanalyze import top_set if effects is top_set: return None @@ -73,7 +94,8 @@ return EffectInfo(readonly_descrs_fields, write_descrs_fields, write_descrs_arrays, - extraeffect) + extraeffect, + oopspecindex) def consider_struct(TYPE, fieldname): if fieldType(TYPE, fieldname) is lltype.Void: @@ -104,3 +126,33 @@ def analyze_simple_operation(self, op): return op.opname in ('jit_force_virtualizable', 'jit_force_virtual') + +# ____________________________________________________________ + +_callinfo_for_oopspec = {} # {oopspecindex: (calldescr, func_as_int)} + +def callinfo_for_oopspec(oopspecindex): + """A function that returns the calldescr and the function + address (as an int) of one of the OS_XYZ functions defined above. + Don't use this if there might be several implementations of the same + OS_XYZ specialized by type, e.g. OS_ARRAYCOPY.""" + try: + return _callinfo_for_oopspec[oopspecindex] + except KeyError: + return (None, 0) + + +def _funcptr_for_oopspec_memo(oopspecindex): + from pypy.jit.codewriter import heaptracker + _, func_as_int = callinfo_for_oopspec(oopspecindex) + funcadr = heaptracker.int2adr(func_as_int) + return funcadr.ptr +_funcptr_for_oopspec_memo._annspecialcase_ = 'specialize:memo' + +def funcptr_for_oopspec(oopspecindex): + """A memo function that returns a pointer to the function described + by OS_XYZ (as a real low-level function pointer).""" + funcptr = _funcptr_for_oopspec_memo(oopspecindex) + assert funcptr + return funcptr +funcptr_for_oopspec._annspecialcase_ = 'specialize:arg(0)' Modified: pypy/branch/fast-forward/pypy/jit/codewriter/jtransform.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/codewriter/jtransform.py (original) +++ pypy/branch/fast-forward/pypy/jit/codewriter/jtransform.py Thu Sep 30 00:16:20 2010 @@ -1,16 +1,18 @@ import py, sys -from pypy.rpython.lltypesystem import lltype, rstr, rclass +from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rclass from pypy.rpython import rlist from pypy.jit.metainterp.history import getkind from pypy.objspace.flow.model import SpaceOperation, Variable, Constant from pypy.objspace.flow.model import Block, Link, c_last_exception from pypy.jit.codewriter.flatten import ListOfKind, IndirectCallTargets from pypy.jit.codewriter import support, heaptracker +from pypy.jit.codewriter.effectinfo import EffectInfo, _callinfo_for_oopspec from pypy.jit.codewriter.policy import log from pypy.jit.metainterp.typesystem import deref, arrayItem from pypy.rlib import objectmodel from pypy.rlib.jit import _we_are_jitted from pypy.translator.simplify import get_funcobj +from pypy.translator.unsimplify import varoftype def transform_graph(graph, cpu=None, callcontrol=None, portal_jd=None): @@ -248,11 +250,13 @@ kind = self.callcontrol.guess_call_kind(op) return getattr(self, 'handle_%s_indirect_call' % kind)(op) - def rewrite_call(self, op, namebase, initialargs): + def rewrite_call(self, op, namebase, initialargs, args=None): """Turn 'i0 = direct_call(fn, i1, i2, ref1, ref2)' into 'i0 = xxx_call_ir_i(fn, descr, [i1,i2], [ref1,ref2])'. The name is one of '{residual,direct}_call_{r,ir,irf}_{i,r,f,v}'.""" - lst_i, lst_r, lst_f = self.make_three_lists(op.args[1:]) + if args is None: + args = op.args[1:] + lst_i, lst_r, lst_f = self.make_three_lists(args) reskind = getkind(op.result.concretetype)[0] if lst_f or reskind == 'f': kinds = 'irf' elif lst_i: kinds = 'ir' @@ -310,6 +314,8 @@ # dispatch to various implementations depending on the oopspec_name if oopspec_name.startswith('list.') or oopspec_name == 'newlist': prepare = self._handle_list_call + elif oopspec_name.startswith('stroruni.'): + prepare = self._handle_stroruni_call elif oopspec_name.startswith('virtual_ref'): prepare = self._handle_virtual_ref_call else: @@ -982,10 +988,7 @@ return extraop + [op] def do_fixed_list_ll_arraycopy(self, op, args, arraydescr): - calldescr = self.callcontrol.getcalldescr(op) - return SpaceOperation('arraycopy', - [calldescr, op.args[0]] + args + [arraydescr], - op.result) + return self._handle_oopspec_call(op, args, EffectInfo.OS_ARRAYCOPY) # ---------- resizable lists ---------- @@ -1023,6 +1026,92 @@ [args[0], lengthdescr], op.result) # ---------- + # Strings and Unicodes. + + def _handle_oopspec_call(self, op, args, oopspecindex): + calldescr = self.callcontrol.getcalldescr(op, oopspecindex) + if isinstance(op.args[0].value, str): + pass # for tests only + else: + func = heaptracker.adr2int( + llmemory.cast_ptr_to_adr(op.args[0].value)) + _callinfo_for_oopspec[oopspecindex] = calldescr, func + op1 = self.rewrite_call(op, 'residual_call', + [op.args[0], calldescr], + args=args) + if self.callcontrol.calldescr_canraise(calldescr): + op1 = [op1, SpaceOperation('-live-', [], None)] + return op1 + + def _register_extra_helper(self, oopspecindex, oopspec_name, + argtypes, resulttype): + # a bit hackish + if oopspecindex in _callinfo_for_oopspec: + return + c_func, TP = support.builtin_func_for_spec(self.cpu.rtyper, + oopspec_name, argtypes, + resulttype) + op = SpaceOperation('pseudo_call', + [c_func] + [varoftype(T) for T in argtypes], + varoftype(resulttype)) + calldescr = self.callcontrol.getcalldescr(op, oopspecindex) + func = heaptracker.adr2int( + llmemory.cast_ptr_to_adr(c_func.value)) + _callinfo_for_oopspec[oopspecindex] = calldescr, func + + def _handle_stroruni_call(self, op, oopspec_name, args): + if args[0].concretetype.TO == rstr.STR: + dict = {"stroruni.concat": EffectInfo.OS_STR_CONCAT, + "stroruni.slice": EffectInfo.OS_STR_SLICE, + "stroruni.equal": EffectInfo.OS_STR_EQUAL, + } + elif args[0].concretetype.TO == rstr.UNICODE: + dict = {"stroruni.concat": EffectInfo.OS_UNI_CONCAT, + "stroruni.slice": EffectInfo.OS_UNI_SLICE, + "stroruni.equal": EffectInfo.OS_UNI_EQUAL, + } + else: + assert 0, "args[0].concretetype must be STR or UNICODE" + # + if oopspec_name == "stroruni.equal": + SoU = args[0].concretetype # Ptr(STR) or Ptr(UNICODE) + for otherindex, othername, argtypes, resulttype in [ + + (EffectInfo.OS_STREQ_SLICE_CHECKNULL, + "str.eq_slice_checknull", + [SoU, lltype.Signed, lltype.Signed, SoU], + lltype.Signed), + (EffectInfo.OS_STREQ_SLICE_NONNULL, + "str.eq_slice_nonnull", + [SoU, lltype.Signed, lltype.Signed, SoU], + lltype.Signed), + (EffectInfo.OS_STREQ_SLICE_CHAR, + "str.eq_slice_char", + [SoU, lltype.Signed, lltype.Signed, lltype.Char], + lltype.Signed), + (EffectInfo.OS_STREQ_NONNULL, + "str.eq_nonnull", + [SoU, SoU], + lltype.Signed), + (EffectInfo.OS_STREQ_NONNULL_CHAR, + "str.eq_nonnull_char", + [SoU, lltype.Char], + lltype.Signed), + (EffectInfo.OS_STREQ_CHECKNULL_CHAR, + "str.eq_checknull_char", + [SoU, lltype.Char], + lltype.Signed), + (EffectInfo.OS_STREQ_LENGTHOK, + "str.eq_lengthok", + [SoU, SoU], + lltype.Signed), + ]: + self._register_extra_helper(otherindex, othername, + argtypes, resulttype) + # + return self._handle_oopspec_call(op, args, dict[oopspec_name]) + + # ---------- # VirtualRefs. def _handle_virtual_ref_call(self, op, oopspec_name, args): Modified: pypy/branch/fast-forward/pypy/jit/codewriter/support.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/codewriter/support.py (original) +++ pypy/branch/fast-forward/pypy/jit/codewriter/support.py Thu Sep 30 00:16:20 2010 @@ -275,10 +275,86 @@ # ---------- strings and unicode ---------- - _ll_5_string_copy_contents = ll_rstr.copy_string_contents - _ll_1_str_str2unicode = ll_rstr.LLHelpers.ll_str2unicode - _ll_5_unicode_copy_contents = ll_rstr.copy_unicode_contents + + def _ll_4_str_eq_slice_checknull(s1, start, length, s2): + """str1[start : start + length] == str2.""" + if not s2: + return 0 + chars2 = s2.chars + if len(chars2) != length: + return 0 + j = 0 + chars1 = s1.chars + while j < length: + if chars1[start + j] != chars2[j]: + return 0 + j += 1 + return 1 + + def _ll_4_str_eq_slice_nonnull(s1, start, length, s2): + """str1[start : start + length] == str2, assuming str2 != NULL.""" + chars2 = s2.chars + if len(chars2) != length: + return 0 + j = 0 + chars1 = s1.chars + while j < length: + if chars1[start + j] != chars2[j]: + return 0 + j += 1 + return 1 + + def _ll_4_str_eq_slice_char(s1, start, length, c2): + """str1[start : start + length] == c2.""" + if length != 1: + return 0 + if s1.chars[start] != c2: + return 0 + return 1 + + def _ll_2_str_eq_nonnull(s1, s2): + len1 = len(s1.chars) + len2 = len(s2.chars) + if len1 != len2: + return 0 + j = 0 + chars1 = s1.chars + chars2 = s2.chars + while j < len1: + if chars1[j] != chars2[j]: + return 0 + j += 1 + return 1 + + def _ll_2_str_eq_nonnull_char(s1, c2): + chars = s1.chars + if len(chars) != 1: + return 0 + if chars[0] != c2: + return 0 + return 1 + + def _ll_2_str_eq_checknull_char(s1, c2): + if not s1: + return 0 + chars = s1.chars + if len(chars) != 1: + return 0 + if chars[0] != c2: + return 0 + return 1 + + def _ll_2_str_eq_lengthok(s1, s2): + j = 0 + chars1 = s1.chars + chars2 = s2.chars + len1 = len(chars1) + while j < len1: + if chars1[j] != chars2[j]: + return 0 + j += 1 + return 1 # ---------- malloc with del ---------- Modified: pypy/branch/fast-forward/pypy/jit/codewriter/test/test_jtransform.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/codewriter/test/test_jtransform.py (original) +++ pypy/branch/fast-forward/pypy/jit/codewriter/test/test_jtransform.py Thu Sep 30 00:16:20 2010 @@ -1,11 +1,16 @@ +import py import random from pypy.objspace.flow.model import FunctionGraph, Block, Link from pypy.objspace.flow.model import SpaceOperation, Variable, Constant from pypy.jit.codewriter.jtransform import Transformer from pypy.jit.metainterp.history import getkind -from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr +from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rlist from pypy.translator.unsimplify import varoftype -from pypy.jit.codewriter import heaptracker +from pypy.jit.codewriter import heaptracker, effectinfo +from pypy.jit.codewriter.flatten import ListOfKind + +def const(x): + return Constant(x, lltype.typeOf(x)) class FakeRTyper: class type_system: name = 'lltypesystem' @@ -17,6 +22,8 @@ return ('calldescr', FUNC, ARGS, RESULT) def fielddescrof(self, STRUCT, name): return ('fielddescr', STRUCT, name) + def arraydescrof(self, ARRAY): + return FakeDescr(('arraydescr', ARRAY)) def sizeof(self, STRUCT): return FakeDescr(('sizedescr', STRUCT)) @@ -67,6 +74,14 @@ def calldescr_canraise(self, calldescr): return False +class FakeBuiltinCallControl: + def guess_call_kind(self, op): + return 'builtin' + def getcalldescr(self, op, oopspecindex): + return 'calldescr-%d' % oopspecindex + def calldescr_canraise(self, calldescr): + return False + def test_optimize_goto_if_not(): v1 = Variable() @@ -107,7 +122,7 @@ assert block.operations == [] assert block.exitswitch == ('int_gt', v1, v2) assert block.exits == exits - assert exits[1].args == [Constant(True, lltype.Bool)] + assert exits[1].args == [const(True)] def test_optimize_goto_if_not__unknownop(): v3 = Variable(); v3.concretetype = lltype.Bool @@ -159,8 +174,8 @@ 'float_gt': ('float_gt', 'float_lt'), } v3 = varoftype(lltype.Signed) - for v1 in [varoftype(lltype.Signed), Constant(42, lltype.Signed)]: - for v2 in [varoftype(lltype.Signed), Constant(43, lltype.Signed)]: + for v1 in [varoftype(lltype.Signed), const(42)]: + for v2 in [varoftype(lltype.Signed), const(43)]: for name1, name2 in ops.items(): op = SpaceOperation(name1, [v1, v2], v3) op1 = Transformer(FakeCPU()).rewrite_operation(op) @@ -177,8 +192,8 @@ def test_symmetric_int_add_ovf(): v3 = varoftype(lltype.Signed) - for v1 in [varoftype(lltype.Signed), Constant(42, lltype.Signed)]: - for v2 in [varoftype(lltype.Signed), Constant(43, lltype.Signed)]: + for v1 in [varoftype(lltype.Signed), const(42)]: + for v2 in [varoftype(lltype.Signed), const(43)]: op = SpaceOperation('int_add_nonneg_ovf', [v1, v2], v3) oplist = Transformer(FakeCPU()).rewrite_operation(op) op0, op1 = oplist @@ -218,7 +233,7 @@ def get_direct_call_op(argtypes, restype): FUNC = lltype.FuncType(argtypes, restype) fnptr = lltype.functionptr(FUNC, "g") # no graph - c_fnptr = Constant(fnptr, concretetype=lltype.typeOf(fnptr)) + c_fnptr = const(fnptr) vars = [varoftype(TYPE) for TYPE in argtypes] v_result = varoftype(restype) op = SpaceOperation('direct_call', [c_fnptr] + vars, v_result) @@ -465,7 +480,7 @@ v1 = varoftype(lltype.Signed) v2 = varoftype(lltype.Signed) v3 = varoftype(lltype.Bool) - c0 = Constant(0, lltype.Signed) + c0 = const(0) # for opname, reducedname in [('int_eq', 'int_is_zero'), ('int_ne', 'int_is_true')]: @@ -488,7 +503,7 @@ v1 = varoftype(rclass.OBJECTPTR) v2 = varoftype(rclass.OBJECTPTR) v3 = varoftype(lltype.Bool) - c0 = Constant(lltype.nullptr(rclass.OBJECT), rclass.OBJECTPTR) + c0 = const(lltype.nullptr(rclass.OBJECT)) # for opname, reducedname in [('ptr_eq', 'ptr_iszero'), ('ptr_ne', 'ptr_nonzero')]: @@ -511,7 +526,7 @@ v1 = varoftype(rclass.NONGCOBJECTPTR) v2 = varoftype(rclass.NONGCOBJECTPTR) v3 = varoftype(lltype.Bool) - c0 = Constant(lltype.nullptr(rclass.NONGCOBJECT), rclass.NONGCOBJECTPTR) + c0 = const(lltype.nullptr(rclass.NONGCOBJECT)) # for opname, reducedname in [('ptr_eq', 'int_is_zero'), ('ptr_ne', 'int_is_true')]: @@ -656,3 +671,119 @@ oplist = tr.rewrite_operation(op) assert oplist[0].opname == 'inline_call_ir_i' assert oplist[0].args[0] == 'somejitcode' + +def test_str_newstr(): + c_STR = Constant(rstr.STR, lltype.Void) + c_flavor = Constant({'flavor': 'gc'}, lltype.Void) + v1 = varoftype(lltype.Signed) + v2 = varoftype(lltype.Ptr(rstr.STR)) + op = SpaceOperation('malloc_varsize', [c_STR, c_flavor, v1], v2) + op1 = Transformer().rewrite_operation(op) + assert op1.opname == 'newstr' + assert op1.args == [v1] + assert op1.result == v2 + +def test_str_concat(): + # test that the oopspec is present and correctly transformed + PSTR = lltype.Ptr(rstr.STR) + FUNC = lltype.FuncType([PSTR, PSTR], PSTR) + func = lltype.functionptr(FUNC, 'll_strconcat', + _callable=rstr.LLHelpers.ll_strconcat) + v1 = varoftype(PSTR) + v2 = varoftype(PSTR) + v3 = varoftype(PSTR) + op = SpaceOperation('direct_call', [const(func), v1, v2], v3) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = tr.rewrite_operation(op) + assert op1.opname == 'residual_call_r_r' + assert op1.args[0].value == func + assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_STR_CONCAT + assert op1.args[2] == ListOfKind('ref', [v1, v2]) + assert op1.result == v3 + +def test_unicode_concat(): + # test that the oopspec is present and correctly transformed + PSTR = lltype.Ptr(rstr.UNICODE) + FUNC = lltype.FuncType([PSTR, PSTR], PSTR) + func = lltype.functionptr(FUNC, 'll_strconcat', + _callable=rstr.LLHelpers.ll_strconcat) + v1 = varoftype(PSTR) + v2 = varoftype(PSTR) + v3 = varoftype(PSTR) + op = SpaceOperation('direct_call', [const(func), v1, v2], v3) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = tr.rewrite_operation(op) + assert op1.opname == 'residual_call_r_r' + assert op1.args[0].value == func + assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_UNI_CONCAT + assert op1.args[2] == ListOfKind('ref', [v1, v2]) + assert op1.result == v3 + # + # check the callinfo_for_oopspec + got = effectinfo.callinfo_for_oopspec(effectinfo.EffectInfo.OS_UNI_CONCAT) + assert got[0] == op1.args[1] # the calldescr + assert heaptracker.int2adr(got[1]) == llmemory.cast_ptr_to_adr(func) + +def test_str_slice(): + # test that the oopspec is present and correctly transformed + PSTR = lltype.Ptr(rstr.STR) + INT = lltype.Signed + FUNC = lltype.FuncType([PSTR, INT, INT], PSTR) + func = lltype.functionptr(FUNC, '_ll_stringslice', + _callable=rstr.LLHelpers._ll_stringslice) + v1 = varoftype(PSTR) + v2 = varoftype(INT) + v3 = varoftype(INT) + v4 = varoftype(PSTR) + op = SpaceOperation('direct_call', [const(func), v1, v2, v3], v4) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = tr.rewrite_operation(op) + assert op1.opname == 'residual_call_ir_r' + assert op1.args[0].value == func + assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_STR_SLICE + assert op1.args[2] == ListOfKind('int', [v2, v3]) + assert op1.args[3] == ListOfKind('ref', [v1]) + assert op1.result == v4 + +def test_unicode_slice(): + # test that the oopspec is present and correctly transformed + PUNICODE = lltype.Ptr(rstr.UNICODE) + INT = lltype.Signed + FUNC = lltype.FuncType([PUNICODE, INT, INT], PUNICODE) + func = lltype.functionptr(FUNC, '_ll_stringslice', + _callable=rstr.LLHelpers._ll_stringslice) + v1 = varoftype(PUNICODE) + v2 = varoftype(INT) + v3 = varoftype(INT) + v4 = varoftype(PUNICODE) + op = SpaceOperation('direct_call', [const(func), v1, v2, v3], v4) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = tr.rewrite_operation(op) + assert op1.opname == 'residual_call_ir_r' + assert op1.args[0].value == func + assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_UNI_SLICE + assert op1.args[2] == ListOfKind('int', [v2, v3]) + assert op1.args[3] == ListOfKind('ref', [v1]) + assert op1.result == v4 + +def test_list_ll_arraycopy(): + from pypy.rlib.rgc import ll_arraycopy + LIST = lltype.GcArray(lltype.Signed) + PLIST = lltype.Ptr(LIST) + INT = lltype.Signed + FUNC = lltype.FuncType([PLIST]*2+[INT]*3, lltype.Void) + func = lltype.functionptr(FUNC, 'll_arraycopy', _callable=ll_arraycopy) + v1 = varoftype(PLIST) + v2 = varoftype(PLIST) + v3 = varoftype(INT) + v4 = varoftype(INT) + v5 = varoftype(INT) + v6 = varoftype(lltype.Void) + op = SpaceOperation('direct_call', [const(func), v1, v2, v3, v4, v5], v6) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = tr.rewrite_operation(op) + assert op1.opname == 'residual_call_ir_v' + assert op1.args[0].value == func + assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_ARRAYCOPY + assert op1.args[2] == ListOfKind('int', [v3, v4, v5]) + assert op1.args[3] == ListOfKind('ref', [v1, v2]) Modified: pypy/branch/fast-forward/pypy/jit/codewriter/test/test_list.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/codewriter/test/test_list.py (original) +++ pypy/branch/fast-forward/pypy/jit/codewriter/test/test_list.py Thu Sep 30 00:16:20 2010 @@ -36,10 +36,16 @@ class FakeCallControl: class getcalldescr(AbstractDescr): - def __init__(self, op): + def __init__(self, op, oopspecindex=0): self.op = op + self.oopspecindex = oopspecindex def __repr__(self): - return '' + if self.oopspecindex == 0: + return '' + else: + return '' % self.oopspecindex + def calldescr_canraise(self, calldescr): + return False def builtin_test(oopspec_name, args, RESTYPE, expected): v_result = varoftype(RESTYPE) @@ -99,7 +105,7 @@ varoftype(lltype.Signed), varoftype(lltype.Signed)], lltype.Void, """ - arraycopy , $'myfunc', %r0, %r1, %i0, %i1, %i2, + residual_call_ir_v $'myfunc', , I[%i0, %i1, %i2], R[%r0, %r1] """) def test_fixed_getitem(): Modified: pypy/branch/fast-forward/pypy/jit/codewriter/test/test_support.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/codewriter/test/test_support.py (original) +++ pypy/branch/fast-forward/pypy/jit/codewriter/test/test_support.py Thu Sep 30 00:16:20 2010 @@ -1,7 +1,8 @@ import py from pypy.rpython.lltypesystem import lltype +from pypy.rpython.annlowlevel import llstr from pypy.objspace.flow.model import Variable, Constant, SpaceOperation -from pypy.jit.codewriter.support import decode_builtin_call +from pypy.jit.codewriter.support import decode_builtin_call, LLtypeHelpers def newconst(x): return Constant(x, lltype.typeOf(x)) @@ -65,3 +66,70 @@ assert opargs == [newconst(myarray), newconst(2), vc, vi] #impl = runner.get_oopspec_impl('spam.foobar', lltype.Ptr(A)) #assert impl(myarray, 2, 'A', 5) == 42 * ord('A') + +def test_streq_slice_checknull(): + p1 = llstr("hello world") + p2 = llstr("wor") + func = LLtypeHelpers._ll_4_str_eq_slice_checknull.im_func + assert func(p1, 6, 3, p2) == True + assert func(p1, 6, 2, p2) == False + assert func(p1, 5, 3, p2) == False + assert func(p1, 2, 1, llstr(None)) == False + +def test_streq_slice_nonnull(): + p1 = llstr("hello world") + p2 = llstr("wor") + func = LLtypeHelpers._ll_4_str_eq_slice_nonnull.im_func + assert func(p1, 6, 3, p2) == True + assert func(p1, 6, 2, p2) == False + assert func(p1, 5, 3, p2) == False + py.test.raises(AttributeError, func, p1, 2, 1, llstr(None)) + +def test_streq_slice_char(): + p1 = llstr("hello world") + func = LLtypeHelpers._ll_4_str_eq_slice_char.im_func + assert func(p1, 6, 3, "w") == False + assert func(p1, 6, 0, "w") == False + assert func(p1, 6, 1, "w") == True + assert func(p1, 6, 1, "x") == False + +def test_streq_nonnull(): + p1 = llstr("wor") + p2 = llstr("wor") + assert p1 != p2 + func = LLtypeHelpers._ll_2_str_eq_nonnull.im_func + assert func(p1, p1) == True + assert func(p1, p2) == True + assert func(p1, llstr("wrl")) == False + assert func(p1, llstr("world")) == False + assert func(p1, llstr("w")) == False + py.test.raises(AttributeError, func, p1, llstr(None)) + py.test.raises(AttributeError, func, llstr(None), p2) + +def test_streq_nonnull_char(): + func = LLtypeHelpers._ll_2_str_eq_nonnull_char.im_func + assert func(llstr("wor"), "x") == False + assert func(llstr("w"), "x") == False + assert func(llstr(""), "x") == False + assert func(llstr("x"), "x") == True + py.test.raises(AttributeError, func, llstr(None), "x") + +def test_streq_checknull_char(): + func = LLtypeHelpers._ll_2_str_eq_checknull_char.im_func + assert func(llstr("wor"), "x") == False + assert func(llstr("w"), "x") == False + assert func(llstr(""), "x") == False + assert func(llstr("x"), "x") == True + assert func(llstr(None), "x") == False + +def test_streq_lengthok(): + p1 = llstr("wor") + p2 = llstr("wor") + assert p1 != p2 + func = LLtypeHelpers._ll_2_str_eq_lengthok.im_func + assert func(p1, p1) == True + assert func(p1, p2) == True + assert func(p1, llstr("wrl")) == False + py.test.raises(IndexError, func, p1, llstr("w")) + py.test.raises(AttributeError, func, p1, llstr(None)) + py.test.raises(AttributeError, func, llstr(None), p2) Modified: pypy/branch/fast-forward/pypy/jit/metainterp/blackhole.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/blackhole.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/blackhole.py Thu Sep 30 00:16:20 2010 @@ -1024,10 +1024,6 @@ def bhimpl_arraylen_gc(cpu, array, arraydescr): return cpu.bh_arraylen_gc(arraydescr, array) - @arguments("cpu", "d", "i", "r", "r", "i", "i", "i", "d") - def bhimpl_arraycopy(cpu, calldescr, func, x1, x2, x3, x4, x5, arraydescr): - cpu.bh_call_v(func, calldescr, [x3, x4, x5], [x1, x2], None) - @arguments("cpu", "r", "d", "d", "i", returns="i") def bhimpl_getarrayitem_vable_i(cpu, vable, fielddescr, arraydescr, index): array = cpu.bh_getfield_gc_r(vable, fielddescr) Modified: pypy/branch/fast-forward/pypy/jit/metainterp/compile.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/compile.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/compile.py Thu Sep 30 00:16:20 2010 @@ -51,7 +51,7 @@ def compile_new_loop(metainterp, old_loop_tokens, greenkey, start): """Try to compile a new loop by closing the current history back to the first operation. - """ + """ history = metainterp.history loop = create_empty_loop(metainterp) loop.greenkey = greenkey @@ -65,7 +65,7 @@ jitdriver_sd = metainterp.jitdriver_sd loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd) loop.token = loop_token - loop.operations[-1].descr = loop_token # patch the target of the JUMP + loop.operations[-1].setdescr(loop_token) # patch the target of the JUMP try: old_loop_token = jitdriver_sd.warmstate.optimize_loop( metainterp_sd, old_loop_tokens, loop) @@ -133,7 +133,7 @@ metainterp_sd.profiler.end_backend() if not we_are_translated(): metainterp_sd.stats.compiled() - metainterp_sd.log("compiled new bridge") + metainterp_sd.log("compiled new bridge") # ____________________________________________________________ @@ -177,7 +177,7 @@ class TerminatingLoopToken(LoopToken): terminating = True - + def __init__(self, nargs, finishdescr): self.specnodes = [prebuiltNotSpecNode]*nargs self.finishdescr = finishdescr @@ -233,14 +233,14 @@ self.metainterp_sd = metainterp_sd def store_final_boxes(self, guard_op, boxes): - guard_op.fail_args = boxes - self.guard_opnum = guard_op.opnum + guard_op.setfailargs(boxes) + self.guard_opnum = guard_op.getopnum() def make_a_counter_per_value(self, guard_value_op): - assert guard_value_op.opnum == rop.GUARD_VALUE - box = guard_value_op.args[0] + assert guard_value_op.getopnum() == rop.GUARD_VALUE + box = guard_value_op.getarg(0) try: - i = guard_value_op.fail_args.index(box) + i = guard_value_op.getfailargs().index(box) except ValueError: return # xxx probably very rare else: @@ -508,7 +508,7 @@ def compile_new_bridge(metainterp, old_loop_tokens, resumekey): """Try to compile a new bridge leading from the beginning of the history to some existing place. - """ + """ # The history contains new operations to attach as the code for the # failure of 'resumekey.guard_op'. # @@ -540,13 +540,14 @@ op = new_loop.operations[-1] if not isinstance(target_loop_token, TerminatingLoopToken): # normal case - op.descr = target_loop_token # patch the jump target + op.setdescr(target_loop_token) # patch the jump target else: # The target_loop_token is a pseudo loop token, # e.g. loop_tokens_done_with_this_frame_void[0] # Replace the operation with the real operation we want, i.e. a FINISH descr = target_loop_token.finishdescr - new_op = ResOperation(rop.FINISH, op.args, None, descr=descr) + args = op.getarglist() + new_op = ResOperation(rop.FINISH, args, None, descr=descr) new_loop.operations[-1] = new_op # ____________________________________________________________ @@ -597,6 +598,6 @@ ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=faildescr), ResOperation(rop.FINISH, finishargs, None, descr=jd.portal_finishtoken) ] - operations[1].fail_args = [] + operations[1].setfailargs([]) cpu.compile_loop(inputargs, operations, loop_token) return loop_token Modified: pypy/branch/fast-forward/pypy/jit/metainterp/executor.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/executor.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/executor.py Thu Sep 30 00:16:20 2010 @@ -2,7 +2,7 @@ """ import py -from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.lltypesystem import lltype, llmemory, rstr from pypy.rpython.ootypesystem import ootype from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.rarithmetic import ovfcheck, r_uint, intmask @@ -165,12 +165,6 @@ def do_new_with_vtable(cpu, _, clsbox): return BoxPtr(exec_new_with_vtable(cpu, clsbox)) -def do_arraycopy(cpu, _, calldescr, funcbox, x1box, x2box, - x3box, x4box, x5box, arraydescr): - cpu.bh_call_v(funcbox.getint(), calldescr, - [x3box.getint(), x4box.getint(), x5box.getint()], - [x1box.getref_base(), x2box.getref_base()], None) - def do_int_add_ovf(cpu, metainterp, box1, box2): # the overflow operations can be called without a metainterp, if an # overflow cannot occur @@ -209,6 +203,24 @@ def do_same_as(cpu, _, box): return box.clonebox() +def do_copystrcontent(cpu, _, srcbox, dstbox, + srcstartbox, dststartbox, lengthbox): + src = srcbox.getptr(lltype.Ptr(rstr.STR)) + dst = dstbox.getptr(lltype.Ptr(rstr.STR)) + srcstart = srcstartbox.getint() + dststart = dststartbox.getint() + length = lengthbox.getint() + rstr.copy_string_contents(src, dst, srcstart, dststart, length) + +def do_copyunicodecontent(cpu, _, srcbox, dstbox, + srcstartbox, dststartbox, lengthbox): + src = srcbox.getptr(lltype.Ptr(rstr.UNICODE)) + dst = dstbox.getptr(lltype.Ptr(rstr.UNICODE)) + srcstart = srcstartbox.getint() + dststart = dststartbox.getint() + length = lengthbox.getint() + rstr.copy_unicode_contents(src, dst, srcstart, dststart, length) + # ____________________________________________________________ ##def do_force_token(cpu): Modified: pypy/branch/fast-forward/pypy/jit/metainterp/gc.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/gc.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/gc.py Thu Sep 30 00:16:20 2010 @@ -19,6 +19,9 @@ class GC_hybrid(GcDescription): malloc_zero_filled = True +class GC_minimark(GcDescription): + malloc_zero_filled = True + def get_description(config): name = config.translation.gc Modified: pypy/branch/fast-forward/pypy/jit/metainterp/graphpage.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/graphpage.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/graphpage.py Thu Sep 30 00:16:20 2010 @@ -17,13 +17,13 @@ for graph, highlight in graphs: for op in graph.get_operations(): if is_interesting_guard(op): - graphs.append((SubGraph(op.descr._debug_suboperations), + graphs.append((SubGraph(op.getdescr()._debug_suboperations), highlight)) graphpage = ResOpGraphPage(graphs, errmsg) graphpage.display() def is_interesting_guard(op): - return hasattr(op.descr, '_debug_suboperations') + return hasattr(op.getdescr(), '_debug_suboperations') class ResOpGraphPage(GraphPage): @@ -76,7 +76,7 @@ for i, op in enumerate(graph.get_operations()): if is_interesting_guard(op): self.mark_starter(graphindex, i+1) - if op.opnum == rop.DEBUG_MERGE_POINT: + if op.getopnum() == rop.DEBUG_MERGE_POINT: if not last_was_mergepoint: last_was_mergepoint = True self.mark_starter(graphindex, i) @@ -155,7 +155,7 @@ op = operations[opindex] lines.append(repr(op)) if is_interesting_guard(op): - tgt = op.descr._debug_suboperations[0] + tgt = op.getdescr()._debug_suboperations[0] tgt_g, tgt_i = self.all_operations[tgt] self.genedge((graphindex, opstartindex), (tgt_g, tgt_i), @@ -167,8 +167,8 @@ self.genedge((graphindex, opstartindex), (graphindex, opindex)) break - if op.opnum == rop.JUMP: - tgt = op.descr + if op.getopnum() == rop.JUMP: + tgt = op.getdescr() tgt_g = -1 if tgt is None: tgt_g = graphindex @@ -191,7 +191,8 @@ def getlinks(self): boxes = {} for op in self.all_operations: - for box in op.args + [op.result]: + args = op.getarglist() + [op.result] + for box in args: if getattr(box, 'is_box', False): boxes[box] = True links = {} Modified: pypy/branch/fast-forward/pypy/jit/metainterp/history.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/history.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/history.py Thu Sep 30 00:16:20 2010 @@ -532,7 +532,7 @@ class BoxFloat(Box): type = FLOAT _attrs_ = ('value',) - + def __init__(self, floatval=0.0): assert isinstance(floatval, float) self.value = floatval @@ -685,6 +685,19 @@ return llmemory.cast_adr_to_int(adr, "emulated") return i +def get_const_ptr_for_string(s): + from pypy.rpython.annlowlevel import llstr + if not we_are_translated(): + try: + return _const_ptr_for_string[s] + except KeyError: + pass + result = ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, llstr(s))) + if not we_are_translated(): + _const_ptr_for_string[s] = result + return result +_const_ptr_for_string = {} + # ____________________________________________________________ # The TreeLoop class contains a loop or a generalized loop, i.e. a tree @@ -759,33 +772,34 @@ assert len(seen) == len(inputargs), ( "duplicate Box in the Loop.inputargs") TreeLoop.check_consistency_of_branch(operations, seen) - + @staticmethod def check_consistency_of_branch(operations, seen): "NOT_RPYTHON" for op in operations: - for box in op.args: + for i in range(op.numargs()): + box = op.getarg(i) if isinstance(box, Box): assert box in seen if op.is_guard(): - assert op.descr is not None - if hasattr(op.descr, '_debug_suboperations'): - ops = op.descr._debug_suboperations + assert op.getdescr() is not None + if hasattr(op.getdescr(), '_debug_suboperations'): + ops = op.getdescr()._debug_suboperations TreeLoop.check_consistency_of_branch(ops, seen.copy()) - for box in op.fail_args or []: + for box in op.getfailargs() or []: if box is not None: assert isinstance(box, Box) assert box in seen else: - assert op.fail_args is None + assert op.getfailargs() is None box = op.result if box is not None: assert isinstance(box, Box) assert box not in seen seen[box] = True assert operations[-1].is_final() - if operations[-1].opnum == rop.JUMP: - target = operations[-1].descr + if operations[-1].getopnum() == rop.JUMP: + target = operations[-1].getdescr() if target is not None: assert isinstance(target, LoopToken) @@ -793,7 +807,8 @@ # RPython-friendly print '%r: inputargs =' % self, self._dump_args(self.inputargs) for op in self.operations: - print '\t', op.getopname(), self._dump_args(op.args), \ + args = op.getarglist() + print '\t', op.getopname(), self._dump_args(args), \ self._dump_box(op.result) def _dump_args(self, boxes): @@ -809,14 +824,14 @@ return '<%s>' % (self.name,) def _list_all_operations(result, operations, omit_finish=True): - if omit_finish and operations[-1].opnum == rop.FINISH: + if omit_finish and operations[-1].getopnum() == rop.FINISH: # xxx obscure return result.extend(operations) for op in operations: - if op.is_guard() and op.descr: - if hasattr(op.descr, '_debug_suboperations'): - ops = op.descr._debug_suboperations + if op.is_guard() and op.getdescr(): + if hasattr(op.getdescr(), '_debug_suboperations'): + ops = op.getdescr()._debug_suboperations _list_all_operations(result, ops, omit_finish) # ____________________________________________________________ @@ -885,7 +900,7 @@ self.aborted_count += 1 def entered(self): - self.enter_count += 1 + self.enter_count += 1 def compiled(self): self.compiled_count += 1 @@ -898,7 +913,7 @@ def add_new_loop(self, loop): self.loops.append(loop) - + # test read interface def get_all_loops(self): Modified: pypy/branch/fast-forward/pypy/jit/metainterp/logger.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/logger.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/logger.py Thu Sep 30 00:16:20 2010 @@ -79,27 +79,27 @@ debug_print('[' + args + ']') for i in range(len(operations)): op = operations[i] - if op.opnum == rop.DEBUG_MERGE_POINT: - loc = op.args[0]._get_str() + if op.getopnum() == rop.DEBUG_MERGE_POINT: + loc = op.getarg(0)._get_str() debug_print("debug_merge_point('%s')" % (loc,)) continue - args = ", ".join([self.repr_of_arg(memo, arg) for arg in op.args]) + args = ", ".join([self.repr_of_arg(memo, op.getarg(i)) for i in range(op.numargs())]) if op.result is not None: res = self.repr_of_arg(memo, op.result) + " = " else: res = "" is_guard = op.is_guard() - if op.descr is not None: - descr = op.descr + if op.getdescr() is not None: + descr = op.getdescr() if is_guard and self.guard_number: index = self.metainterp_sd.cpu.get_fail_descr_number(descr) r = "" % index else: r = self.repr_of_descr(descr) args += ', descr=' + r - if is_guard and op.fail_args is not None: + if is_guard and op.getfailargs() is not None: fail_args = ' [' + ", ".join([self.repr_of_arg(memo, arg) - for arg in op.fail_args]) + ']' + for arg in op.getfailargs()]) + ']' else: fail_args = '' debug_print(res + op.getopname() + Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimize.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/optimize.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/optimize.py Thu Sep 30 00:16:20 2010 @@ -43,7 +43,7 @@ finder.find_nodes_bridge(bridge) for old_loop_token in old_loop_tokens: if finder.bridge_matches(old_loop_token.specnodes): - bridge.operations[-1].descr = old_loop_token # patch jump target + bridge.operations[-1].setdescr(old_loop_token) # patch jump target optimize_bridge_1(metainterp_sd, bridge) return old_loop_token return None Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizefindnode.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/optimizefindnode.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizefindnode.py Thu Sep 30 00:16:20 2010 @@ -144,7 +144,7 @@ def find_nodes(self, operations): for op in operations: - opnum = op.opnum + opnum = op.getopnum() for value, func in find_nodes_ops: if opnum == value: func(self, op) @@ -154,77 +154,79 @@ def find_nodes_default(self, op): if op.is_always_pure(): - for arg in op.args: + for i in range(op.numargs()): + arg = op.getarg(i) if self.get_constant_box(arg) is None: break else: # all constant arguments: we can constant-fold - argboxes = [self.get_constant_box(arg) for arg in op.args] + argboxes = [self.get_constant_box(op.getarg(i)) + for i in range(op.numargs())] resbox = execute_nonspec(self.cpu, None, - op.opnum, argboxes, op.descr) + op.getopnum(), argboxes, op.getdescr()) self.set_constant_node(op.result, resbox.constbox()) # default case: mark the arguments as escaping - for box in op.args: - self.getnode(box).mark_escaped() + for i in range(op.numargs()): + self.getnode(op.getarg(i)).mark_escaped() def find_nodes_no_escape(self, op): pass # for operations that don't escape their arguments find_nodes_PTR_EQ = find_nodes_no_escape find_nodes_PTR_NE = find_nodes_no_escape - find_nodes_INSTANCEOF = find_nodes_no_escape + ##find_nodes_INSTANCEOF = find_nodes_no_escape find_nodes_GUARD_NONNULL = find_nodes_no_escape find_nodes_GUARD_ISNULL = find_nodes_no_escape def find_nodes_NEW_WITH_VTABLE(self, op): instnode = InstanceNode() - box = op.args[0] + box = op.getarg(0) assert isinstance(box, Const) instnode.knownclsbox = box self.nodes[op.result] = instnode def find_nodes_NEW(self, op): instnode = InstanceNode() - instnode.structdescr = op.descr + instnode.structdescr = op.getdescr() self.nodes[op.result] = instnode def find_nodes_NEW_ARRAY(self, op): - lengthbox = op.args[0] + lengthbox = op.getarg(0) lengthbox = self.get_constant_box(lengthbox) if lengthbox is None: return # var-sized arrays are not virtual arraynode = InstanceNode() arraynode.arraysize = lengthbox.getint() - arraynode.arraydescr = op.descr + arraynode.arraydescr = op.getdescr() self.nodes[op.result] = arraynode def find_nodes_ARRAYLEN_GC(self, op): - arraynode = self.getnode(op.args[0]) + arraynode = self.getnode(op.getarg(0)) if arraynode.arraydescr is not None: resbox = ConstInt(arraynode.arraysize) self.set_constant_node(op.result, resbox) def find_nodes_GUARD_CLASS(self, op): - instnode = self.getnode(op.args[0]) + instnode = self.getnode(op.getarg(0)) if instnode.fromstart: # only useful (and safe) in this case - box = op.args[1] + box = op.getarg(1) assert isinstance(box, Const) instnode.knownclsbox = box def find_nodes_GUARD_VALUE(self, op): - instnode = self.getnode(op.args[0]) + instnode = self.getnode(op.getarg(0)) if instnode.fromstart: # only useful (and safe) in this case - box = op.args[1] + box = op.getarg(1) assert isinstance(box, Const) instnode.knownvaluebox = box def find_nodes_SETFIELD_GC(self, op): - instnode = self.getnode(op.args[0]) - fieldnode = self.getnode(op.args[1]) + instnode = self.getnode(op.getarg(0)) + fieldnode = self.getnode(op.getarg(1)) if instnode.escaped: fieldnode.mark_escaped() return # nothing to be gained from tracking the field - field = op.descr + field = op.getdescr() assert isinstance(field, AbstractValue) if instnode.curfields is None: instnode.curfields = {} @@ -232,10 +234,10 @@ instnode.add_escape_dependency(fieldnode) def find_nodes_GETFIELD_GC(self, op): - instnode = self.getnode(op.args[0]) + instnode = self.getnode(op.getarg(0)) if instnode.escaped: return # nothing to be gained from tracking the field - field = op.descr + field = op.getdescr() assert isinstance(field, AbstractValue) if instnode.curfields is not None and field in instnode.curfields: fieldnode = instnode.curfields[field] @@ -254,13 +256,13 @@ find_nodes_GETFIELD_GC_PURE = find_nodes_GETFIELD_GC def find_nodes_SETARRAYITEM_GC(self, op): - indexbox = op.args[1] + indexbox = op.getarg(1) indexbox = self.get_constant_box(indexbox) if indexbox is None: self.find_nodes_default(op) # not a Const index return - arraynode = self.getnode(op.args[0]) - itemnode = self.getnode(op.args[2]) + arraynode = self.getnode(op.getarg(0)) + itemnode = self.getnode(op.getarg(2)) if arraynode.escaped: itemnode.mark_escaped() return # nothing to be gained from tracking the item @@ -270,12 +272,12 @@ arraynode.add_escape_dependency(itemnode) def find_nodes_GETARRAYITEM_GC(self, op): - indexbox = op.args[1] + indexbox = op.getarg(1) indexbox = self.get_constant_box(indexbox) if indexbox is None: self.find_nodes_default(op) # not a Const index return - arraynode = self.getnode(op.args[0]) + arraynode = self.getnode(op.getarg(0)) if arraynode.escaped: return # nothing to be gained from tracking the item index = indexbox.getint() @@ -298,13 +300,15 @@ def find_nodes_JUMP(self, op): # only set up the 'unique' field of the InstanceNodes; # real handling comes later (build_result_specnodes() for loops). - for box in op.args: + for i in range(op.numargs()): + box = op.getarg(i) self.getnode(box).set_unique_nodes() def find_nodes_FINISH(self, op): # only for bridges, and only for the ones that end in a 'return' # or 'raise'; all other cases end with a JUMP. - for box in op.args: + for i in range(op.numargs()): + box = op.getarg(i) self.getnode(box).unique = UNIQUE_NO find_nodes_ops = _findall(NodeFinder, 'find_nodes_') @@ -315,16 +319,17 @@ class PerfectSpecializationFinder(NodeFinder): node_fromstart = InstanceNode(fromstart=True) - def find_nodes_loop(self, loop): + def find_nodes_loop(self, loop, build_specnodes=True): self._loop = loop self.setup_input_nodes(loop.inputargs) self.find_nodes(loop.operations) - self.build_result_specnodes(loop) + if build_specnodes: + self.build_result_specnodes(loop) def show(self): from pypy.jit.metainterp.viewnode import viewnodes, view op = self._loop.operations[-1] - assert op.opnum == rop.JUMP + assert op.getopnum() == rop.JUMP exitnodes = [self.getnode(arg) for arg in op.args] viewnodes(self.inputnodes, exitnodes) if hasattr(self._loop.token, "specnodes"): @@ -343,14 +348,14 @@ # Build the list of specnodes based on the result # computed by NodeFinder.find_nodes(). op = loop.operations[-1] - assert op.opnum == rop.JUMP - assert len(self.inputnodes) == len(op.args) + assert op.getopnum() == rop.JUMP + assert len(self.inputnodes) == op.numargs() while True: self.restart_needed = False specnodes = [] - for i in range(len(op.args)): + for i in range(op.numargs()): inputnode = self.inputnodes[i] - exitnode = self.getnode(op.args[i]) + exitnode = self.getnode(op.getarg(i)) specnodes.append(self.intersect(inputnode, exitnode)) if not self.restart_needed: break @@ -562,9 +567,9 @@ def bridge_matches(self, nextloop_specnodes): jump_op = self.jump_op - assert len(jump_op.args) == len(nextloop_specnodes) + assert jump_op.numargs() == len(nextloop_specnodes) for i in range(len(nextloop_specnodes)): - exitnode = self.getnode(jump_op.args[i]) + exitnode = self.getnode(jump_op.getarg(i)) if not nextloop_specnodes[i].matches_instance_node(exitnode): return False return True Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/__init__.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/__init__.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/__init__.py Thu Sep 30 00:16:20 2010 @@ -3,6 +3,7 @@ from pypy.jit.metainterp.optimizeopt.intbounds import OptIntBounds from pypy.jit.metainterp.optimizeopt.virtualize import OptVirtualize from pypy.jit.metainterp.optimizeopt.heap import OptHeap +from pypy.jit.metainterp.optimizeopt.string import OptString def optimize_loop_1(metainterp_sd, loop, virtuals=True): """Optimize loop.operations to make it match the input of loop.specnodes @@ -13,6 +14,7 @@ optimizations = [OptIntBounds(), OptRewrite(), OptVirtualize(), + OptString(), OptHeap(), ] optimizer = Optimizer(metainterp_sd, loop, optimizations, virtuals) @@ -23,4 +25,3 @@ expect 'specnodes' on the bridge. """ optimize_loop_1(metainterp_sd, bridge, False) - Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/heap.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/heap.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/heap.py Thu Sep 30 00:16:20 2010 @@ -45,7 +45,7 @@ op = self.lazy_setfields.get(descr, None) if op is None: return None - return self.getvalue(op.args[1]) + return self.getvalue(op.getarg(1)) return d.get(value, None) def cache_arrayitem_value(self, descr, value, indexvalue, fieldvalue, write=False): @@ -105,7 +105,7 @@ if op.is_guard(): self.optimizer.pendingfields = self.force_lazy_setfields_for_guard() return - opnum = op.opnum + opnum = op.getopnum() if (opnum == rop.SETFIELD_GC or opnum == rop.SETARRAYITEM_GC or opnum == rop.DEBUG_MERGE_POINT): @@ -117,7 +117,7 @@ if opnum == rop.CALL_ASSEMBLER: effectinfo = None else: - effectinfo = op.descr.get_extra_info() + effectinfo = op.getdescr().get_extra_info() if effectinfo is not None: # XXX we can get the wrong complexity here, if the lists # XXX stored on effectinfo are large @@ -142,7 +142,7 @@ return self.force_all_lazy_setfields() elif op.is_final() or (not we_are_translated() and - op.opnum < 0): # escape() operations + op.getopnum() < 0): # escape() operations self.force_all_lazy_setfields() self.clean_caches() @@ -166,10 +166,11 @@ # - is_comparison() for cases like "int_eq/setfield_gc/guard_true" # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced" # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow" - opnum = prevop.opnum + opnum = prevop.getopnum() + lastop_args = lastop.getarglist() if ((prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE or prevop.is_ovf()) - and prevop.result not in lastop.args): + and prevop.result not in lastop_args): newoperations[-2] = lastop newoperations[-1] = prevop @@ -189,9 +190,9 @@ # the only really interesting case that we need to handle in the # guards' resume data is that of a virtual object that is stored # into a field of a non-virtual object. - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) assert not value.is_virtual() # it must be a non-virtual - fieldvalue = self.getvalue(op.args[1]) + fieldvalue = self.getvalue(op.getarg(1)) if fieldvalue.is_virtual(): # this is the case that we leave to resume.py pendingfields.append((descr, value.box, @@ -202,20 +203,20 @@ def force_lazy_setfield_if_necessary(self, op, value, write=False): try: - op1 = self.lazy_setfields[op.descr] + op1 = self.lazy_setfields[op.getdescr()] except KeyError: if write: - self.lazy_setfields_descrs.append(op.descr) + self.lazy_setfields_descrs.append(op.getdescr()) else: - if self.getvalue(op1.args[0]) is not value: - self.force_lazy_setfield(op.descr) + if self.getvalue(op1.getarg(0)) is not value: + self.force_lazy_setfield(op.getdescr()) def optimize_GETFIELD_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) self.force_lazy_setfield_if_necessary(op, value) # check if the field was read from another getfield_gc just before # or has been written to recently - fieldvalue = self.read_cached_field(op.descr, value) + fieldvalue = self.read_cached_field(op.getdescr(), value) if fieldvalue is not None: self.make_equal_to(op.result, fieldvalue) return @@ -225,38 +226,38 @@ self.emit_operation(op) # FIXME: These might need constant propagation? # then remember the result of reading the field fieldvalue = self.getvalue(op.result) - self.cache_field_value(op.descr, value, fieldvalue) + self.cache_field_value(op.getdescr(), value, fieldvalue) def optimize_SETFIELD_GC(self, op): - value = self.getvalue(op.args[0]) - fieldvalue = self.getvalue(op.args[1]) + value = self.getvalue(op.getarg(0)) + fieldvalue = self.getvalue(op.getarg(1)) self.force_lazy_setfield_if_necessary(op, value, write=True) - self.lazy_setfields[op.descr] = op + self.lazy_setfields[op.getdescr()] = op # remember the result of future reads of the field - self.cache_field_value(op.descr, value, fieldvalue, write=True) + self.cache_field_value(op.getdescr(), value, fieldvalue, write=True) def optimize_GETARRAYITEM_GC(self, op): - value = self.getvalue(op.args[0]) - indexvalue = self.getvalue(op.args[1]) - fieldvalue = self.read_cached_arrayitem(op.descr, value, indexvalue) + value = self.getvalue(op.getarg(0)) + indexvalue = self.getvalue(op.getarg(1)) + fieldvalue = self.read_cached_arrayitem(op.getdescr(), value, indexvalue) if fieldvalue is not None: self.make_equal_to(op.result, fieldvalue) return ###self.optimizer.optimize_default(op) self.emit_operation(op) # FIXME: These might need constant propagation? fieldvalue = self.getvalue(op.result) - self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue) + self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue) def optimize_SETARRAYITEM_GC(self, op): self.emit_operation(op) - value = self.getvalue(op.args[0]) - fieldvalue = self.getvalue(op.args[2]) - indexvalue = self.getvalue(op.args[1]) - self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue, + value = self.getvalue(op.getarg(0)) + fieldvalue = self.getvalue(op.getarg(2)) + indexvalue = self.getvalue(op.getarg(1)) + self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue, write=True) def propagate_forward(self, op): - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/intbounds.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/intbounds.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/intbounds.py Thu Sep 30 00:16:20 2010 @@ -10,7 +10,7 @@ remove redundant guards""" def propagate_forward(self, op): - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) @@ -31,7 +31,7 @@ op = self.optimizer.producer[box] except KeyError: return - opnum = op.opnum + opnum = op.getopnum() for value, func in propagate_bounds_ops: if opnum == value: func(self, op) @@ -39,14 +39,14 @@ def optimize_GUARD_TRUE(self, op): self.emit_operation(op) - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) optimize_GUARD_FALSE = optimize_GUARD_TRUE optimize_GUARD_VALUE = optimize_GUARD_TRUE def optimize_INT_AND(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) r = self.getvalue(op.result) @@ -60,74 +60,74 @@ r.intbound.intersect(IntBound(0,val)) def optimize_INT_SUB(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(v1.intbound.sub_bound(v2.intbound)) def optimize_INT_ADD(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(v1.intbound.add_bound(v2.intbound)) def optimize_INT_MUL(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(v1.intbound.mul_bound(v2.intbound)) def optimize_INT_ADD_OVF(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.add_bound(v2.intbound) if resbound.has_lower and resbound.has_upper and \ - self.nextop().opnum == rop.GUARD_NO_OVERFLOW: + self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW: # Transform into INT_ADD and remove guard - op.opnum = rop.INT_ADD + op = op.copy_and_change(rop.INT_ADD) self.skip_nextop() - self.optimize_INT_ADD(op) + self.optimize_INT_ADD(op) # emit the op else: self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(resbound) def optimize_INT_SUB_OVF(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.sub_bound(v2.intbound) if resbound.has_lower and resbound.has_upper and \ - self.nextop().opnum == rop.GUARD_NO_OVERFLOW: + self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW: # Transform into INT_SUB and remove guard - op.opnum = rop.INT_SUB + op = op.copy_and_change(rop.INT_SUB) self.skip_nextop() - self.optimize_INT_SUB(op) + self.optimize_INT_SUB(op) # emit the op else: self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(resbound) def optimize_INT_MUL_OVF(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.mul_bound(v2.intbound) if resbound.has_lower and resbound.has_upper and \ - self.nextop().opnum == rop.GUARD_NO_OVERFLOW: + self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW: # Transform into INT_MUL and remove guard - op.opnum = rop.INT_MUL + op = op.copy_and_change(rop.INT_MUL) self.skip_nextop() - self.optimize_INT_MUL(op) + self.optimize_INT_MUL(op) # emit the op else: self.emit_operation(op) r = self.getvalue(op.result) r.intbound.intersect(resbound) def optimize_INT_LT(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_ge(v2.intbound): @@ -136,8 +136,8 @@ self.emit_operation(op) def optimize_INT_GT(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_le(v2.intbound): @@ -146,8 +146,8 @@ self.emit_operation(op) def optimize_INT_LE(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_le(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_gt(v2.intbound): @@ -156,8 +156,8 @@ self.emit_operation(op) def optimize_INT_GE(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_ge(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_lt(v2.intbound): @@ -166,8 +166,8 @@ self.emit_operation(op) def optimize_INT_EQ(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 0) elif v1.intbound.known_lt(v2.intbound): @@ -176,8 +176,8 @@ self.emit_operation(op) def optimize_INT_NE(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 1) elif v1.intbound.known_lt(v2.intbound): @@ -192,115 +192,114 @@ optimize_STRLEN = optimize_ARRAYLEN_GC - def make_int_lt(self, args): - v1 = self.getvalue(args[0]) - v2 = self.getvalue(args[1]) + def make_int_lt(self, box1, box2): + v1 = self.getvalue(box1) + v2 = self.getvalue(box2) if v1.intbound.make_lt(v2.intbound): - self.propagate_bounds_backward(args[0]) + self.propagate_bounds_backward(box1) if v2.intbound.make_gt(v1.intbound): - self.propagate_bounds_backward(args[1]) + self.propagate_bounds_backward(box2) - - def make_int_le(self, args): - v1 = self.getvalue(args[0]) - v2 = self.getvalue(args[1]) + def make_int_le(self, box1, box2): + v1 = self.getvalue(box1) + v2 = self.getvalue(box2) if v1.intbound.make_le(v2.intbound): - self.propagate_bounds_backward(args[0]) + self.propagate_bounds_backward(box1) if v2.intbound.make_ge(v1.intbound): - self.propagate_bounds_backward(args[1]) + self.propagate_bounds_backward(box2) - def make_int_gt(self, args): - self.make_int_lt([args[1], args[0]]) + def make_int_gt(self, box1, box2): + self.make_int_lt(box2, box1) - def make_int_ge(self, args): - self.make_int_le([args[1], args[0]]) + def make_int_ge(self, box1, box2): + self.make_int_le(box2, box1) def propagate_bounds_INT_LT(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - self.make_int_lt(op.args) + self.make_int_lt(op.getarg(0), op.getarg(1)) else: - self.make_int_ge(op.args) + self.make_int_ge(op.getarg(0), op.getarg(1)) def propagate_bounds_INT_GT(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - self.make_int_gt(op.args) + self.make_int_gt(op.getarg(0), op.getarg(1)) else: - self.make_int_le(op.args) + self.make_int_le(op.getarg(0), op.getarg(1)) def propagate_bounds_INT_LE(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - self.make_int_le(op.args) + self.make_int_le(op.getarg(0), op.getarg(1)) else: - self.make_int_gt(op.args) + self.make_int_gt(op.getarg(0), op.getarg(1)) def propagate_bounds_INT_GE(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - self.make_int_ge(op.args) + self.make_int_ge(op.getarg(0), op.getarg(1)) else: - self.make_int_lt(op.args) + self.make_int_lt(op.getarg(0), op.getarg(1)) def propagate_bounds_INT_EQ(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_1): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.intersect(v2.intbound): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) if v2.intbound.intersect(v1.intbound): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_NE(self, op): r = self.getvalue(op.result) if r.is_constant(): if r.box.same_constant(CONST_0): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.intbound.intersect(v2.intbound): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) if v2.intbound.intersect(v1.intbound): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_ADD(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) r = self.getvalue(op.result) b = r.intbound.sub_bound(v2.intbound) if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) b = r.intbound.sub_bound(v1.intbound) if v2.intbound.intersect(b): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_SUB(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) r = self.getvalue(op.result) b = r.intbound.add_bound(v2.intbound) if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) b = r.intbound.sub_bound(v1.intbound).mul(-1) if v2.intbound.intersect(b): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_MUL(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) r = self.getvalue(op.result) b = r.intbound.div_bound(v2.intbound) if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.args[0]) + self.propagate_bounds_backward(op.getarg(0)) b = r.intbound.div_bound(v1.intbound) if v2.intbound.intersect(b): - self.propagate_bounds_backward(op.args[1]) + self.propagate_bounds_backward(op.getarg(1)) propagate_bounds_INT_ADD_OVF = propagate_bounds_INT_ADD propagate_bounds_INT_SUB_OVF = propagate_bounds_INT_SUB Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/optimizer.py Thu Sep 30 00:16:20 2010 @@ -12,17 +12,19 @@ from pypy.rpython.lltypesystem import lltype from pypy.jit.metainterp.history import AbstractDescr, make_hashable_int from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded +from pypy.tool.pairtype import extendabletype LEVEL_UNKNOWN = '\x00' LEVEL_NONNULL = '\x01' LEVEL_KNOWNCLASS = '\x02' # might also mean KNOWNARRAYDESCR, for arrays -LEVEL_CONSTANT = '\x03' +LEVEL_CONSTANT = '\x03' import sys MAXINT = sys.maxint MININT = -sys.maxint - 1 - + class OptValue(object): + __metaclass__ = extendabletype _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound') last_guard_index = -1 @@ -36,7 +38,7 @@ if isinstance(box, Const): self.make_constant(box) # invariant: box is a Const if and only if level == LEVEL_CONSTANT - + def force_box(self): return self.box @@ -126,6 +128,7 @@ def setitem(self, index, value): raise NotImplementedError + class ConstantValue(OptValue): def __init__(self, box): self.make_constant(box) @@ -134,6 +137,7 @@ CONST_1 = ConstInt(1) CVAL_ZERO = ConstantValue(CONST_0) CVAL_ZERO_FLOAT = ConstantValue(ConstFloat(0.0)) +CVAL_UNINITIALIZED_ZERO = ConstantValue(CONST_0) llhelper.CVAL_NULLREF = ConstantValue(llhelper.CONST_NULL) oohelper.CVAL_NULLREF = ConstantValue(oohelper.CONST_NULL) @@ -171,7 +175,7 @@ def new_const_item(self, arraydescr): return self.optimizer.new_const_item(arraydescr) - + def pure(self, opnum, args, result): op = ResOperation(opnum, args, result) self.optimizer.pure_operations[self.optimizer.make_args_key(op)] = op @@ -184,7 +188,7 @@ def setup(self, virtuals): pass - + class Optimizer(Optimization): def __init__(self, metainterp_sd, loop, optimizations=None, virtuals=True): @@ -249,6 +253,7 @@ return None def make_equal_to(self, box, value): + assert isinstance(value, OptValue) assert box not in self.values self.values[box] = value @@ -306,9 +311,12 @@ # accumulate counters self.resumedata_memo.update_counters(self.metainterp_sd.profiler) + def send_extra_operation(self, op): + self.first_optimization.propagate_forward(op) + def propagate_forward(self, op): self.producer[op.result] = op - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) @@ -323,15 +331,15 @@ self._emit_operation(op) def _emit_operation(self, op): - for i in range(len(op.args)): - arg = op.args[i] + for i in range(op.numargs()): + arg = op.getarg(i) if arg in self.values: box = self.values[arg].force_box() - op.args[i] = box + op.setarg(i, box) self.metainterp_sd.profiler.count(jitprof.OPT_OPS) if op.is_guard(): self.metainterp_sd.profiler.count(jitprof.OPT_GUARDS) - self.store_final_boxes_in_guard(op) + op = self.store_final_boxes_in_guard(op) elif op.can_raise(): self.exception_might_have_happened = True elif op.returns_bool_result(): @@ -340,7 +348,7 @@ def store_final_boxes_in_guard(self, op): ###pendingfields = self.heap_op_optimizer.force_lazy_setfields_for_guard() - descr = op.descr + descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo) newboxes = modifier.finish(self.values, self.pendingfields) @@ -348,49 +356,54 @@ compile.giveup() descr.store_final_boxes(op, newboxes) # - if op.opnum == rop.GUARD_VALUE: - if self.getvalue(op.args[0]) in self.bool_boxes: + if op.getopnum() == rop.GUARD_VALUE: + if self.getvalue(op.getarg(0)) in self.bool_boxes: # Hack: turn guard_value(bool) into guard_true/guard_false. # This is done after the operation is emitted to let # store_final_boxes_in_guard set the guard_opnum field of the # descr to the original rop.GUARD_VALUE. - constvalue = op.args[1].getint() + constvalue = op.getarg(1).getint() if constvalue == 0: opnum = rop.GUARD_FALSE elif constvalue == 1: opnum = rop.GUARD_TRUE else: raise AssertionError("uh?") - op.opnum = opnum - op.args = [op.args[0]] + newop = ResOperation(opnum, [op.getarg(0)], op.result, descr) + newop.setfailargs(op.getfailargs()) + return newop else: # a real GUARD_VALUE. Make it use one counter per value. descr.make_a_counter_per_value(op) + return op def make_args_key(self, op): - args = op.args[:] - for i in range(len(args)): - arg = args[i] + args = [] + for i in range(op.numargs()): + arg = op.getarg(i) if arg in self.values: - args[i] = self.values[arg].get_key_box() - args.append(ConstInt(op.opnum)) + args.append(self.values[arg].get_key_box()) + else: + args.append(arg) + args.append(ConstInt(op.getopnum())) return args - + def optimize_default(self, op): canfold = op.is_always_pure() is_ovf = op.is_ovf() if is_ovf: nextop = self.loop.operations[self.i + 1] - canfold = nextop.opnum == rop.GUARD_NO_OVERFLOW + canfold = nextop.getopnum() == rop.GUARD_NO_OVERFLOW if canfold: - for arg in op.args: - if self.get_constant_box(arg) is None: + for i in range(op.numargs()): + if self.get_constant_box(op.getarg(i)) is None: break else: # all constant arguments: constant-fold away - argboxes = [self.get_constant_box(arg) for arg in op.args] + argboxes = [self.get_constant_box(op.getarg(i)) + for i in range(op.numargs())] resbox = execute_nonspec(self.cpu, None, - op.opnum, argboxes, op.descr) + op.getopnum(), argboxes, op.getdescr()) self.make_constant(op.result, resbox.constbox()) if is_ovf: self.i += 1 # skip next operation, it is the unneeded guard @@ -399,8 +412,8 @@ # did we do the exact same operation already? args = self.make_args_key(op) oldop = self.pure_operations.get(args, None) - if oldop is not None and oldop.descr is op.descr: - assert oldop.opnum == op.opnum + if oldop is not None and oldop.getdescr() is op.getdescr(): + assert oldop.getopnum() == op.getopnum() self.make_equal_to(op.result, self.getvalue(oldop.result)) if is_ovf: self.i += 1 # skip next operation, it is the unneeded guard Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/rewrite.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/rewrite.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/rewrite.py Thu Sep 30 00:16:20 2010 @@ -14,7 +14,7 @@ if self.find_rewritable_bool(op, args): return - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) @@ -24,7 +24,7 @@ def try_boolinvers(self, op, targs): oldop = self.optimizer.pure_operations.get(targs, None) - if oldop is not None and oldop.descr is op.descr: + if oldop is not None and oldop.getdescr() is op.getdescr(): value = self.getvalue(oldop.result) if value.is_constant(): if value.box.same_constant(CONST_1): @@ -39,7 +39,7 @@ def find_rewritable_bool(self, op, args): try: - oldopnum = opboolinvers[op.opnum] + oldopnum = opboolinvers[op.getopnum()] targs = [args[0], args[1], ConstInt(oldopnum)] if self.try_boolinvers(op, targs): return True @@ -47,17 +47,17 @@ pass try: - oldopnum = opboolreflex[op.opnum] # FIXME: add INT_ADD, INT_MUL + oldopnum = opboolreflex[op.getopnum()] # FIXME: add INT_ADD, INT_MUL targs = [args[1], args[0], ConstInt(oldopnum)] oldop = self.optimizer.pure_operations.get(targs, None) - if oldop is not None and oldop.descr is op.descr: + if oldop is not None and oldop.getdescr() is op.getdescr(): self.make_equal_to(op.result, self.getvalue(oldop.result)) return True except KeyError: pass try: - oldopnum = opboolinvers[opboolreflex[op.opnum]] + oldopnum = opboolinvers[opboolreflex[op.getopnum()]] targs = [args[1], args[0], ConstInt(oldopnum)] if self.try_boolinvers(op, targs): return True @@ -67,16 +67,16 @@ return False def optimize_INT_AND(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.is_null() or v2.is_null(): self.make_constant_int(op.result, 0) else: self.emit_operation(op) def optimize_INT_OR(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v1.is_null(): self.make_equal_to(op.result, v2) elif v2.is_null(): @@ -85,20 +85,20 @@ self.emit_operation(op) def optimize_INT_SUB(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) if v2.is_constant() and v2.box.getint() == 0: self.make_equal_to(op.result, v1) else: self.emit_operation(op) # Synthesize the reverse ops for optimize_default to reuse - self.pure(rop.INT_ADD, [op.result, op.args[1]], op.args[0]) - self.pure(rop.INT_SUB, [op.args[0], op.result], op.args[1]) + self.pure(rop.INT_ADD, [op.result, op.getarg(1)], op.getarg(0)) + self.pure(rop.INT_SUB, [op.getarg(0), op.result], op.getarg(1)) def optimize_INT_ADD(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) # If one side of the op is 0 the result is the other side. if v1.is_constant() and v1.box.getint() == 0: @@ -109,12 +109,12 @@ self.emit_operation(op) # Synthesize the reverse op for optimize_default to reuse - self.pure(rop.INT_SUB, [op.result, op.args[1]], op.args[0]) - self.pure(rop.INT_SUB, [op.result, op.args[0]], op.args[1]) + self.pure(rop.INT_SUB, [op.result, op.getarg(1)], op.getarg(0)) + self.pure(rop.INT_SUB, [op.result, op.getarg(0)], op.getarg(1)) def optimize_INT_MUL(self, op): - v1 = self.getvalue(op.args[0]) - v2 = self.getvalue(op.args[1]) + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) # If one side of the op is 1 the result is the other side. if v1.is_constant() and v1.box.getint() == 1: @@ -128,18 +128,21 @@ self.emit_operation(op) def optimize_CALL_PURE(self, op): - for arg in op.args: + for i in range(op.numargs()): + arg = op.getarg(i) if self.get_constant_box(arg) is None: break else: # all constant arguments: constant-fold away - self.make_constant(op.result, op.args[0]) + self.make_constant(op.result, op.getarg(0)) return # replace CALL_PURE with just CALL - self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result, - op.descr)) + args = op.getarglist()[1:] + self.emit_operation(ResOperation(rop.CALL, args, op.result, + op.getdescr())) + def optimize_guard(self, op, constbox, emit_operation=True): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_constant(): box = value.box assert isinstance(box, Const) @@ -151,7 +154,7 @@ value.make_constant(constbox) def optimize_GUARD_ISNULL(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_null(): return elif value.is_nonnull(): @@ -160,7 +163,7 @@ value.make_constant(self.optimizer.cpu.ts.CONST_NULL) def optimize_GUARD_NONNULL(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_nonnull(): return elif value.is_null(): @@ -169,25 +172,25 @@ value.make_nonnull(len(self.optimizer.newoperations) - 1) def optimize_GUARD_VALUE(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) emit_operation = True if value.last_guard_index != -1: # there already has been a guard_nonnull or guard_class or # guard_nonnull_class on this value, which is rather silly. # replace the original guard with a guard_value old_guard_op = self.optimizer.newoperations[value.last_guard_index] - old_opnum = old_guard_op.opnum - old_guard_op.opnum = rop.GUARD_VALUE - old_guard_op.args = [old_guard_op.args[0], op.args[1]] + new_guard_op = old_guard_op.copy_and_change(rop.GUARD_VALUE, + args = [old_guard_op.getarg(0), op.getarg(1)]) + self.optimizer.newoperations[value.last_guard_index] = new_guard_op # hack hack hack. Change the guard_opnum on - # old_guard_op.descr so that when resuming, + # new_guard_op.getdescr() so that when resuming, # the operation is not skipped by pyjitpl.py. - descr = old_guard_op.descr + descr = new_guard_op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) descr.guard_opnum = rop.GUARD_VALUE - descr.make_a_counter_per_value(old_guard_op) + descr.make_a_counter_per_value(new_guard_op) emit_operation = False - constbox = op.args[1] + constbox = op.getarg(1) assert isinstance(constbox, Const) self.optimize_guard(op, constbox, emit_operation) @@ -198,8 +201,8 @@ self.optimize_guard(op, CONST_0) def optimize_GUARD_CLASS(self, op): - value = self.getvalue(op.args[0]) - expectedclassbox = op.args[1] + value = self.getvalue(op.getarg(0)) + expectedclassbox = op.getarg(1) assert isinstance(expectedclassbox, Const) realclassbox = value.get_constant_class(self.optimizer.cpu) if realclassbox is not None: @@ -213,15 +216,16 @@ # there already has been a guard_nonnull or guard_class or # guard_nonnull_class on this value. old_guard_op = self.optimizer.newoperations[value.last_guard_index] - if old_guard_op.opnum == rop.GUARD_NONNULL: + if old_guard_op.getopnum() == rop.GUARD_NONNULL: # it was a guard_nonnull, which we replace with a # guard_nonnull_class. - old_guard_op.opnum = rop.GUARD_NONNULL_CLASS - old_guard_op.args = [old_guard_op.args[0], op.args[1]] + new_guard_op = old_guard_op.copy_and_change (rop.GUARD_NONNULL_CLASS, + args = [old_guard_op.getarg(0), op.getarg(1)]) + self.optimizer.newoperations[value.last_guard_index] = new_guard_op # hack hack hack. Change the guard_opnum on - # old_guard_op.descr so that when resuming, + # new_guard_op.getdescr() so that when resuming, # the operation is not skipped by pyjitpl.py. - descr = old_guard_op.descr + descr = new_guard_op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) descr.guard_opnum = rop.GUARD_NONNULL_CLASS emit_operation = False @@ -239,18 +243,18 @@ self.optimizer.exception_might_have_happened = False def optimize_CALL_LOOPINVARIANT(self, op): - funcvalue = self.getvalue(op.args[0]) + funcvalue = self.getvalue(op.getarg(0)) if not funcvalue.is_constant(): self.emit_operation(op) return - key = make_hashable_int(op.args[0].getint()) + key = make_hashable_int(op.getarg(0).getint()) resvalue = self.optimizer.loop_invariant_results.get(key, None) if resvalue is not None: self.make_equal_to(op.result, resvalue) return # change the op to be a normal call, from the backend's point of view # there is no reason to have a separate operation for this - op.opnum = rop.CALL + op = op.copy_and_change(rop.CALL) self.emit_operation(op) resvalue = self.getvalue(op.result) self.optimizer.loop_invariant_results[key] = resvalue @@ -265,17 +269,17 @@ self.emit_operation(op) def optimize_INT_IS_TRUE(self, op): - if self.getvalue(op.args[0]) in self.optimizer.bool_boxes: - self.make_equal_to(op.result, self.getvalue(op.args[0])) + if self.getvalue(op.getarg(0)) in self.optimizer.bool_boxes: + self.make_equal_to(op.result, self.getvalue(op.getarg(0))) return - self._optimize_nullness(op, op.args[0], True) + self._optimize_nullness(op, op.getarg(0), True) def optimize_INT_IS_ZERO(self, op): - self._optimize_nullness(op, op.args[0], False) + self._optimize_nullness(op, op.getarg(0), False) def _optimize_oois_ooisnot(self, op, expect_isnot): - value0 = self.getvalue(op.args[0]) - value1 = self.getvalue(op.args[1]) + value0 = self.getvalue(op.getarg(0)) + value1 = self.getvalue(op.getarg(1)) if value0.is_virtual(): if value1.is_virtual(): intres = (value0 is value1) ^ expect_isnot @@ -285,9 +289,9 @@ elif value1.is_virtual(): self.make_constant_int(op.result, expect_isnot) elif value1.is_null(): - self._optimize_nullness(op, op.args[0], expect_isnot) + self._optimize_nullness(op, op.getarg(0), expect_isnot) elif value0.is_null(): - self._optimize_nullness(op, op.args[1], expect_isnot) + self._optimize_nullness(op, op.getarg(1), expect_isnot) elif value0 is value1: self.make_constant_int(op.result, not expect_isnot) else: @@ -307,17 +311,17 @@ def optimize_PTR_EQ(self, op): self._optimize_oois_ooisnot(op, False) - def optimize_INSTANCEOF(self, op): - value = self.getvalue(op.args[0]) - realclassbox = value.get_constant_class(self.optimizer.cpu) - if realclassbox is not None: - checkclassbox = self.optimizer.cpu.typedescr2classbox(op.descr) - result = self.optimizer.cpu.ts.subclassOf(self.optimizer.cpu, - realclassbox, - checkclassbox) - self.make_constant_int(op.result, result) - return - self.emit_operation(op) +## def optimize_INSTANCEOF(self, op): +## value = self.getvalue(op.args[0]) +## realclassbox = value.get_constant_class(self.optimizer.cpu) +## if realclassbox is not None: +## checkclassbox = self.optimizer.cpu.typedescr2classbox(op.descr) +## result = self.optimizer.cpu.ts.subclassOf(self.optimizer.cpu, +## realclassbox, +## checkclassbox) +## self.make_constant_int(op.result, result) +## return +## self.emit_operation(op) optimize_ops = _findall(OptRewrite, 'optimize_') Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/virtualize.py Thu Sep 30 00:16:20 2010 @@ -188,12 +188,12 @@ itemboxes.append(itemvalue.get_key_box()) modifier.register_virtual_fields(self.keybox, itemboxes) for itemvalue in self._items: - if itemvalue is not self.constvalue: - itemvalue.get_args_for_fail(modifier) + itemvalue.get_args_for_fail(modifier) def _make_virtual(self, modifier): return modifier.make_varray(self.arraydescr) + class __extend__(SpecNode): def setup_virtual_node(self, optimizer, box, newinputargs): raise NotImplementedError @@ -258,7 +258,7 @@ def setup(self, virtuals): if not virtuals: return - + inputargs = self.optimizer.loop.inputargs specnodes = self.optimizer.loop.token.specnodes assert len(inputargs) == len(specnodes) @@ -285,18 +285,18 @@ def optimize_JUMP(self, op): orgop = self.optimizer.loop.operations[-1] exitargs = [] - target_loop_token = orgop.descr + target_loop_token = orgop.getdescr() assert isinstance(target_loop_token, LoopToken) specnodes = target_loop_token.specnodes - assert len(op.args) == len(specnodes) + assert op.numargs() == len(specnodes) for i in range(len(specnodes)): - value = self.getvalue(op.args[i]) + value = self.getvalue(op.getarg(i)) specnodes[i].teardown_virtual_node(self, value, exitargs) - op.args = exitargs[:] + op = op.copy_and_change(op.getopnum(), args=exitargs[:]) self.emit_operation(op) def optimize_VIRTUAL_REF(self, op): - indexbox = op.args[1] + indexbox = op.getarg(1) # # get some constants vrefinfo = self.optimizer.metainterp_sd.virtualref_info @@ -322,17 +322,16 @@ # typically a PyPy PyFrame, and now is the end of its execution, so # forcing it now does not have catastrophic effects. vrefinfo = self.optimizer.metainterp_sd.virtualref_info - # op.args[1] should really never point to null here + # op.getarg(1) should really never point to null here # - set 'forced' to point to the real object - op1 = ResOperation(rop.SETFIELD_GC, op.args, None, - descr = vrefinfo.descr_forced) - self.optimize_SETFIELD_GC(op1) + seo = self.optimizer.send_extra_operation + seo(ResOperation(rop.SETFIELD_GC, op.getarglist(), None, + descr = vrefinfo.descr_forced)) # - set 'virtual_token' to TOKEN_NONE - args = [op.args[0], ConstInt(vrefinfo.TOKEN_NONE)] - op1 = ResOperation(rop.SETFIELD_GC, args, None, - descr = vrefinfo.descr_virtual_token) - self.optimize_SETFIELD_GC(op1) - # Note that in some cases the virtual in op.args[1] has been forced + args = [op.getarg(0), ConstInt(vrefinfo.TOKEN_NONE)] + seo(ResOperation(rop.SETFIELD_GC, args, None, + descr = vrefinfo.descr_virtual_token)) + # Note that in some cases the virtual in op.getarg(1) has been forced # already. This is fine. In that case, and *if* a residual # CALL_MAY_FORCE suddenly turns out to access it, then it will # trigger a ResumeGuardForcedDescr.handle_async_forcing() which @@ -340,11 +339,11 @@ # was already forced). def optimize_GETFIELD_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_virtual(): # optimizefindnode should ensure that fieldvalue is found assert isinstance(value, AbstractVirtualValue) - fieldvalue = value.getfield(op.descr, None) + fieldvalue = value.getfield(op.getdescr(), None) assert fieldvalue is not None self.make_equal_to(op.result, fieldvalue) else: @@ -357,36 +356,36 @@ optimize_GETFIELD_GC_PURE = optimize_GETFIELD_GC def optimize_SETFIELD_GC(self, op): - value = self.getvalue(op.args[0]) - fieldvalue = self.getvalue(op.args[1]) + value = self.getvalue(op.getarg(0)) if value.is_virtual(): - value.setfield(op.descr, fieldvalue) + fieldvalue = self.getvalue(op.getarg(1)) + value.setfield(op.getdescr(), fieldvalue) else: value.ensure_nonnull() ###self.heap_op_optimizer.optimize_SETFIELD_GC(op, value, fieldvalue) self.emit_operation(op) def optimize_NEW_WITH_VTABLE(self, op): - self.make_virtual(op.args[0], op.result, op) + self.make_virtual(op.getarg(0), op.result, op) def optimize_NEW(self, op): - self.make_vstruct(op.descr, op.result, op) + self.make_vstruct(op.getdescr(), op.result, op) def optimize_NEW_ARRAY(self, op): - sizebox = self.get_constant_box(op.args[0]) + sizebox = self.get_constant_box(op.getarg(0)) if sizebox is not None: # if the original 'op' did not have a ConstInt as argument, # build a new one with the ConstInt argument - if not isinstance(op.args[0], ConstInt): + if not isinstance(op.getarg(0), ConstInt): op = ResOperation(rop.NEW_ARRAY, [sizebox], op.result, - descr=op.descr) - self.make_varray(op.descr, sizebox.getint(), op.result, op) + descr=op.getdescr()) + self.make_varray(op.getdescr(), sizebox.getint(), op.result, op) else: - ###self.optimize_default(op) + self.getvalue(op.result).ensure_nonnull() self.emit_operation(op) def optimize_ARRAYLEN_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_virtual(): self.make_constant_int(op.result, value.getlength()) else: @@ -395,9 +394,9 @@ self.emit_operation(op) def optimize_GETARRAYITEM_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_virtual(): - indexbox = self.get_constant_box(op.args[1]) + indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: itemvalue = value.getitem(indexbox.getint()) self.make_equal_to(op.result, itemvalue) @@ -411,41 +410,18 @@ optimize_GETARRAYITEM_GC_PURE = optimize_GETARRAYITEM_GC def optimize_SETARRAYITEM_GC(self, op): - value = self.getvalue(op.args[0]) + value = self.getvalue(op.getarg(0)) if value.is_virtual(): - indexbox = self.get_constant_box(op.args[1]) + indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: - value.setitem(indexbox.getint(), self.getvalue(op.args[2])) + value.setitem(indexbox.getint(), self.getvalue(op.getarg(2))) return value.ensure_nonnull() ###self.heap_op_optimizer.optimize_SETARRAYITEM_GC(op, value, fieldvalue) self.emit_operation(op) - def optimize_ARRAYCOPY(self, op): - source_value = self.getvalue(op.args[2]) - dest_value = self.getvalue(op.args[3]) - source_start_box = self.get_constant_box(op.args[4]) - dest_start_box = self.get_constant_box(op.args[5]) - length = self.get_constant_box(op.args[6]) - if (source_value.is_virtual() and source_start_box and dest_start_box - and length and dest_value.is_virtual()): - # XXX optimize the case where dest value is not virtual, - # but we still can avoid a mess - source_start = source_start_box.getint() - dest_start = dest_start_box.getint() - for index in range(length.getint()): - val = source_value.getitem(index + source_start) - dest_value.setitem(index + dest_start, val) - return - if length and length.getint() == 0: - return # 0-length arraycopy - descr = op.args[0] - assert isinstance(descr, AbstractDescr) - self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result, - descr)) - def propagate_forward(self, op): - opnum = op.opnum + opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeutil.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeutil.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeutil.py Thu Sep 30 00:16:20 2010 @@ -14,6 +14,11 @@ def _findall(Class, name_prefix): result = [] + for name in dir(Class): + if name.startswith(name_prefix): + opname = name[len(name_prefix):] + if opname.isupper(): + assert hasattr(resoperation.rop, opname) for value, name in resoperation.opname.items(): if hasattr(Class, name_prefix + name): result.append((value, getattr(Class, name_prefix + name))) Modified: pypy/branch/fast-forward/pypy/jit/metainterp/pyjitpl.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/pyjitpl.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/pyjitpl.py Thu Sep 30 00:16:20 2010 @@ -159,7 +159,7 @@ if got_type == history.INT: self.registers_i[target_index] = resultbox elif got_type == history.REF: - #debug_print(' ->', + #debug_print(' ->', # llmemory.cast_ptr_to_adr(resultbox.getref_base())) self.registers_r[target_index] = resultbox elif got_type == history.FLOAT: @@ -421,14 +421,6 @@ def opimpl_arraylen_gc(self, arraybox, arraydescr): return self.execute_with_descr(rop.ARRAYLEN_GC, arraydescr, arraybox) - @arguments("descr", "box", "box", "box", "box", "box", "box", "descr") - def opimpl_arraycopy(self, calldescr, fnptr, sourcebox, destbox, - source_startbox, dest_startbox, lengthbox, - arraydescr): - self.execute_with_descr(rop.ARRAYCOPY, arraydescr, calldescr, fnptr, - sourcebox, destbox, source_startbox, - dest_startbox, lengthbox) - @arguments("orgpc", "box", "descr", "box") def opimpl_check_neg_index(self, orgpc, arraybox, arraydescr, indexbox): negbox = self.metainterp.execute_and_record( @@ -446,7 +438,7 @@ def opimpl_newlist(self, structdescr, lengthdescr, itemsdescr, arraydescr, sizebox): sbox = self.metainterp.execute_and_record(rop.NEW, structdescr) - self.metainterp.execute_and_record(rop.SETFIELD_GC, lengthdescr, + self.metainterp.execute_and_record(rop.SETFIELD_GC, lengthdescr, sbox, sizebox) abox = self.metainterp.execute_and_record(rop.NEW_ARRAY, arraydescr, sizebox) @@ -1004,7 +996,7 @@ resumedescr = compile.ResumeGuardDescr(metainterp_sd, original_greenkey) guard_op = metainterp.history.record(opnum, moreargs, None, - descr=resumedescr) + descr=resumedescr) virtualizable_boxes = None if metainterp.jitdriver_sd.virtualizable_info is not None: virtualizable_boxes = metainterp.virtualizable_boxes @@ -1463,7 +1455,7 @@ resbox = self._record_helper_nonpure_varargs(opnum, resbox, descr, argboxes) return resbox - def _record_helper_pure(self, opnum, resbox, descr, *argboxes): + def _record_helper_pure(self, opnum, resbox, descr, *argboxes): canfold = self._all_constants(*argboxes) if canfold: resbox = resbox.constbox() # ensure it is a Const @@ -1472,7 +1464,7 @@ resbox = resbox.nonconstbox() # ensure it is a Box return self._record_helper_nonpure_varargs(opnum, resbox, descr, list(argboxes)) - def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes): + def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes): canfold = self._all_constants_varargs(argboxes) if canfold: resbox = resbox.constbox() # ensure it is a Const @@ -1485,7 +1477,7 @@ assert resbox is None or isinstance(resbox, Box) # record the operation profiler = self.staticdata.profiler - profiler.count_ops(opnum, RECORDED_OPS) + profiler.count_ops(opnum, RECORDED_OPS) op = self.history.record(opnum, argboxes, resbox, descr) self.attach_debug_info(op) return resbox @@ -1667,7 +1659,7 @@ # Search in current_merge_points for original_boxes with compatible # green keys, representing the beginning of the same loop as the one - # we end now. + # we end now. num_green_args = self.jitdriver_sd.num_green_args for j in range(len(self.current_merge_points)-1, -1, -1): @@ -1922,7 +1914,7 @@ vrefbox = self.virtualref_boxes[i+1] # record VIRTUAL_REF_FINISH just before the current CALL_MAY_FORCE call_may_force_op = self.history.operations.pop() - assert call_may_force_op.opnum == rop.CALL_MAY_FORCE + assert call_may_force_op.getopnum() == rop.CALL_MAY_FORCE self.history.record(rop.VIRTUAL_REF_FINISH, [vrefbox, virtualbox], None) self.history.operations.append(call_may_force_op) @@ -2088,10 +2080,10 @@ """ Patch a CALL into a CALL_PURE. """ op = self.history.operations[-1] - assert op.opnum == rop.CALL + assert op.getopnum() == rop.CALL resbox_as_const = resbox.constbox() - for arg in op.args: - if not isinstance(arg, Const): + for i in range(op.numargs()): + if not isinstance(op.getarg(i), Const): break else: # all-constants: remove the CALL operation now and propagate a @@ -2100,8 +2092,8 @@ return resbox_as_const # not all constants (so far): turn CALL into CALL_PURE, which might # be either removed later by optimizeopt or turned back into CALL. - op.opnum = rop.CALL_PURE - op.args = [resbox_as_const] + op.args + newop = op.copy_and_change(rop.CALL_PURE, args=[resbox_as_const]+op.getarglist()) + self.history.operations[-1] = newop return resbox def direct_assembler_call(self, targetjitdriver_sd): @@ -2109,10 +2101,11 @@ patching the CALL_MAY_FORCE that occurred just now. """ op = self.history.operations.pop() - assert op.opnum == rop.CALL_MAY_FORCE + assert op.getopnum() == rop.CALL_MAY_FORCE num_green_args = targetjitdriver_sd.num_green_args - greenargs = op.args[1:num_green_args+1] - args = op.args[num_green_args+1:] + arglist = op.getarglist() + greenargs = arglist[1:num_green_args+1] + args = arglist[num_green_args+1:] assert len(args) == targetjitdriver_sd.num_red_args vinfo = targetjitdriver_sd.virtualizable_info if vinfo is not None: @@ -2122,9 +2115,7 @@ # ^^^ and not "+=", which makes 'args' a resizable list warmrunnerstate = targetjitdriver_sd.warmstate token = warmrunnerstate.get_assembler_token(greenargs, args) - op.opnum = rop.CALL_ASSEMBLER - op.args = args - op.descr = token + op = op.copy_and_change(rop.CALL_ASSEMBLER, args=args, descr=token) self.history.operations.append(op) # ____________________________________________________________ Modified: pypy/branch/fast-forward/pypy/jit/metainterp/resoperation.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/resoperation.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/resoperation.py Thu Sep 30 00:16:20 2010 @@ -1,42 +1,90 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import make_sure_not_resized -class ResOperation(object): - """The central ResOperation class, representing one operation.""" +def ResOperation(opnum, args, result, descr=None): + cls = opclasses[opnum] + op = cls(result) + op.initarglist(args) + if descr is not None: + assert isinstance(op, ResOpWithDescr) + op.setdescr(descr) + return op + - # for 'guard_*' - fail_args = None +class AbstractResOp(object): + """The central ResOperation class, representing one operation.""" # debug name = "" pc = 0 - def __init__(self, opnum, args, result, descr=None): - make_sure_not_resized(args) - assert isinstance(opnum, int) - self.opnum = opnum - self.args = list(args) - make_sure_not_resized(self.args) - assert not isinstance(result, list) + def __init__(self, result): self.result = result - self.setdescr(descr) + + # methods implemented by each concrete class + # ------------------------------------------ + + def getopnum(self): + raise NotImplementedError + + # methods implemented by the arity mixins + # --------------------------------------- + + def initarglist(self, args): + "This is supposed to be called only just after the ResOp has been created" + raise NotImplementedError + + def getarglist(self): + raise NotImplementedError + + def getarg(self, i): + raise NotImplementedError + + def setarg(self, i, box): + raise NotImplementedError + + def numargs(self): + raise NotImplementedError + + + # methods implemented by GuardResOp + # --------------------------------- + + def getfailargs(self): + return None + + def setfailargs(self, fail_args): + raise NotImplementedError + + # methods implemented by ResOpWithDescr + # ------------------------------------- + + def getdescr(self): + return None def setdescr(self, descr): - # for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt - # instance provided by the backend holding details about the type - # of the operation. It must inherit from AbstractDescr. The - # backend provides it with cpu.fielddescrof(), cpu.arraydescrof(), - # cpu.calldescrof(), and cpu.typedescrof(). - from pypy.jit.metainterp.history import check_descr - check_descr(descr) - self.descr = descr + raise NotImplementedError + + # common methods + # -------------- + + def copy_and_change(self, opnum, args=None, result=None, descr=None): + "shallow copy: the returned operation is meant to be used in place of self" + if args is None: + args = self.getarglist() + if result is None: + result = self.result + if descr is None: + descr = self.getdescr() + newop = ResOperation(opnum, args, result, descr) + return newop def clone(self): - descr = self.descr + args = self.getarglist() + descr = self.getdescr() if descr is not None: descr = descr.clone_if_mutable() - op = ResOperation(self.opnum, self.args, self.result, descr) - op.fail_args = self.fail_args + op = ResOperation(self.getopnum(), args, self.result, descr) if not we_are_translated(): op.name = self.name op.pc = self.pc @@ -55,82 +103,271 @@ prefix = "%s:%s " % (self.name, self.pc) else: prefix = "" - if self.descr is None or we_are_translated(): + args = self.getarglist() + descr = self.getdescr() + if descr is None or we_are_translated(): return '%s%s%s(%s)' % (prefix, sres, self.getopname(), - ', '.join([str(a) for a in self.args])) + ', '.join([str(a) for a in args])) else: return '%s%s%s(%s, descr=%r)' % (prefix, sres, self.getopname(), - ', '.join([str(a) for a in self.args]), self.descr) + ', '.join([str(a) for a in args]), descr) def getopname(self): try: - return opname[self.opnum].lower() + return opname[self.getopnum()].lower() except KeyError: - return '<%d>' % self.opnum + return '<%d>' % self.getopnum() def is_guard(self): - return rop._GUARD_FIRST <= self.opnum <= rop._GUARD_LAST + return rop._GUARD_FIRST <= self.getopnum() <= rop._GUARD_LAST def is_foldable_guard(self): - return rop._GUARD_FOLDABLE_FIRST <= self.opnum <= rop._GUARD_FOLDABLE_LAST + return rop._GUARD_FOLDABLE_FIRST <= self.getopnum() <= rop._GUARD_FOLDABLE_LAST def is_guard_exception(self): - return (self.opnum == rop.GUARD_EXCEPTION or - self.opnum == rop.GUARD_NO_EXCEPTION) + return (self.getopnum() == rop.GUARD_EXCEPTION or + self.getopnum() == rop.GUARD_NO_EXCEPTION) def is_guard_overflow(self): - return (self.opnum == rop.GUARD_OVERFLOW or - self.opnum == rop.GUARD_NO_OVERFLOW) + return (self.getopnum() == rop.GUARD_OVERFLOW or + self.getopnum() == rop.GUARD_NO_OVERFLOW) def is_always_pure(self): - return rop._ALWAYS_PURE_FIRST <= self.opnum <= rop._ALWAYS_PURE_LAST + return rop._ALWAYS_PURE_FIRST <= self.getopnum() <= rop._ALWAYS_PURE_LAST def has_no_side_effect(self): - return rop._NOSIDEEFFECT_FIRST <= self.opnum <= rop._NOSIDEEFFECT_LAST + return rop._NOSIDEEFFECT_FIRST <= self.getopnum() <= rop._NOSIDEEFFECT_LAST def can_raise(self): - return rop._CANRAISE_FIRST <= self.opnum <= rop._CANRAISE_LAST + return rop._CANRAISE_FIRST <= self.getopnum() <= rop._CANRAISE_LAST def is_ovf(self): - return rop._OVF_FIRST <= self.opnum <= rop._OVF_LAST + return rop._OVF_FIRST <= self.getopnum() <= rop._OVF_LAST def is_comparison(self): return self.is_always_pure() and self.returns_bool_result() def is_final(self): - return rop._FINAL_FIRST <= self.opnum <= rop._FINAL_LAST + return rop._FINAL_FIRST <= self.getopnum() <= rop._FINAL_LAST def returns_bool_result(self): - opnum = self.opnum + opnum = self.getopnum() if we_are_translated(): assert opnum >= 0 elif opnum < 0: return False # for tests return opboolresult[opnum] + +# =================== +# Top of the hierachy +# =================== + +class PlainResOp(AbstractResOp): + pass + +class ResOpWithDescr(AbstractResOp): + + _descr = None + + def getdescr(self): + return self._descr + + def setdescr(self, descr): + # for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt + # instance provided by the backend holding details about the type + # of the operation. It must inherit from AbstractDescr. The + # backend provides it with cpu.fielddescrof(), cpu.arraydescrof(), + # cpu.calldescrof(), and cpu.typedescrof(). + from pypy.jit.metainterp.history import check_descr + check_descr(descr) + self._descr = descr + +class GuardResOp(ResOpWithDescr): + + _fail_args = None + + def getfailargs(self): + return self._fail_args + + def setfailargs(self, fail_args): + self._fail_args = fail_args + + def copy_and_change(self, opnum, args=None, result=None, descr=None): + newop = AbstractResOp.copy_and_change(self, opnum, args, result, descr) + newop.setfailargs(self.getfailargs()) + return newop + + def clone(self): + newop = AbstractResOp.clone(self) + newop.setfailargs(self.getfailargs()) + return newop + + +# ============ +# arity mixins +# ============ + +class NullaryOp(object): + _mixin_ = True + + def initarglist(self, args): + assert len(args) == 0 + + def getarglist(self): + return [] + + def numargs(self): + return 0 + + def getarg(self, i): + raise IndexError + + def setarg(self, i, box): + raise IndexError + + +class UnaryOp(object): + _mixin_ = True + _arg0 = None + + def initarglist(self, args): + assert len(args) == 1 + self._arg0, = args + + def getarglist(self): + return [self._arg0] + + def numargs(self): + return 1 + + def getarg(self, i): + if i == 0: + return self._arg0 + else: + raise IndexError + + def setarg(self, i, box): + if i == 0: + self._arg0 = box + else: + raise IndexError + + +class BinaryOp(object): + _mixin_ = True + _arg0 = None + _arg1 = None + + def initarglist(self, args): + assert len(args) == 2 + self._arg0, self._arg1 = args + + def getarglist(self): + return [self._arg0, self._arg1, self._arg2] + + def numargs(self): + return 2 + + def getarg(self, i): + if i == 0: + return self._arg0 + elif i == 1: + return self._arg1 + else: + raise IndexError + + def setarg(self, i, box): + if i == 0: + self._arg0 = box + elif i == 1: + self._arg1 = box + else: + raise IndexError + + def getarglist(self): + return [self._arg0, self._arg1] + + +class TernaryOp(object): + _mixin_ = True + _arg0 = None + _arg1 = None + _arg2 = None + + def initarglist(self, args): + assert len(args) == 3 + self._arg0, self._arg1, self._arg2 = args + + def getarglist(self): + return [self._arg0, self._arg1, self._arg2] + + def numargs(self): + return 3 + + def getarg(self, i): + if i == 0: + return self._arg0 + elif i == 1: + return self._arg1 + elif i == 2: + return self._arg2 + else: + raise IndexError + + def setarg(self, i, box): + if i == 0: + self._arg0 = box + elif i == 1: + self._arg1 = box + elif i == 2: + self._arg2 = box + else: + raise IndexError + +class N_aryOp(object): + _mixin_ = True + _args = None + + def initarglist(self, args): + self._args = args + + def getarglist(self): + return self._args + + def numargs(self): + return len(self._args) + + def getarg(self, i): + return self._args[i] + + def setarg(self, i, box): + self._args[i] = box + + # ____________________________________________________________ _oplist = [ '_FINAL_FIRST', - 'JUMP', - 'FINISH', + 'JUMP/*d', + 'FINISH/*d', '_FINAL_LAST', '_GUARD_FIRST', '_GUARD_FOLDABLE_FIRST', - 'GUARD_TRUE', - 'GUARD_FALSE', - 'GUARD_VALUE', - 'GUARD_CLASS', - 'GUARD_NONNULL', - 'GUARD_ISNULL', - 'GUARD_NONNULL_CLASS', + 'GUARD_TRUE/1d', + 'GUARD_FALSE/1d', + 'GUARD_VALUE/2d', + 'GUARD_CLASS/2d', + 'GUARD_NONNULL/1d', + 'GUARD_ISNULL/1d', + 'GUARD_NONNULL_CLASS/2d', '_GUARD_FOLDABLE_LAST', - 'GUARD_NO_EXCEPTION', - 'GUARD_EXCEPTION', - 'GUARD_NO_OVERFLOW', - 'GUARD_OVERFLOW', - 'GUARD_NOT_FORCED', + 'GUARD_NO_EXCEPTION/0d', + 'GUARD_EXCEPTION/1d', + 'GUARD_NO_OVERFLOW/0d', + 'GUARD_OVERFLOW/0d', + 'GUARD_NOT_FORCED/0d', '_GUARD_LAST', # ----- end of guard operations ----- '_NOSIDEEFFECT_FIRST', # ----- start of no_side_effect operations ----- @@ -213,24 +450,25 @@ 'SETARRAYITEM_RAW/3d', 'SETFIELD_GC/2d', 'SETFIELD_RAW/2d', - 'ARRAYCOPY/7d', # removed before it's passed to the backend 'NEWSTR/1', 'STRSETITEM/3', 'UNICODESETITEM/3', 'NEWUNICODE/1', - #'RUNTIMENEW/1', # ootype operation - 'COND_CALL_GC_WB', # [objptr, newvalue] (for the write barrier) + #'RUNTIMENEW/1', # ootype operation + 'COND_CALL_GC_WB/1d', # [objptr] (for the write barrier) 'DEBUG_MERGE_POINT/1', # debugging only 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend + 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length + 'COPYUNICODECONTENT/5', '_CANRAISE_FIRST', # ----- start of can_raise operations ----- - 'CALL', - 'CALL_ASSEMBLER', - 'CALL_MAY_FORCE', - 'CALL_LOOPINVARIANT', + 'CALL/*d', + 'CALL_ASSEMBLER/*d', + 'CALL_MAY_FORCE/*d', + 'CALL_LOOPINVARIANT/*d', #'OOSEND', # ootype operation #'OOSEND_PURE', # ootype operation - 'CALL_PURE', # removed before it's passed to the backend + 'CALL_PURE/*d', # removed before it's passed to the backend # CALL_PURE(result, func, arg_1,..,arg_n) '_CANRAISE_LAST', # ----- end of can_raise operations ----- @@ -247,6 +485,7 @@ class rop(object): pass +opclasses = [] # mapping numbers to the concrete ResOp class opname = {} # mapping numbers to the original names, for debugging oparity = [] # mapping numbers to the arity of the operation or -1 opwithdescr = [] # mapping numbers to a flag "takes a descr" @@ -261,16 +500,62 @@ name, arity = name.split('/') withdescr = 'd' in arity boolresult = 'b' in arity - arity = int(arity.rstrip('db')) + arity = arity.rstrip('db') + if arity == '*': + arity = -1 + else: + arity = int(arity) else: arity, withdescr, boolresult = -1, True, False # default setattr(rop, name, i) if not name.startswith('_'): opname[i] = name + cls = create_class_for_op(name, i, arity, withdescr) + else: + cls = None + opclasses.append(cls) oparity.append(arity) opwithdescr.append(withdescr) opboolresult.append(boolresult) - assert len(oparity)==len(opwithdescr)==len(opboolresult)==len(_oplist) + assert len(opclasses)==len(oparity)==len(opwithdescr)==len(opboolresult)==len(_oplist) + +def get_base_class(mixin, base): + try: + return get_base_class.cache[(mixin, base)] + except KeyError: + arity_name = mixin.__name__[:-2] # remove the trailing "Op" + name = arity_name + base.__name__ # something like BinaryPlainResOp + bases = (mixin, base) + cls = type(name, bases, {}) + get_base_class.cache[(mixin, base)] = cls + return cls +get_base_class.cache = {} + +def create_class_for_op(name, opnum, arity, withdescr): + arity2mixin = { + 0: NullaryOp, + 1: UnaryOp, + 2: BinaryOp, + 3: TernaryOp + } + + is_guard = name.startswith('GUARD') + if is_guard: + assert withdescr + baseclass = GuardResOp + elif withdescr: + baseclass = ResOpWithDescr + else: + baseclass = PlainResOp + mixin = arity2mixin.get(arity, N_aryOp) + + def getopnum(self): + return opnum + + cls_name = '%s_OP' % name + bases = (get_base_class(mixin, baseclass),) + dic = {'getopnum': getopnum} + return type(cls_name, bases, dic) setup(__name__ == '__main__') # print out the table when run directly del _oplist Modified: pypy/branch/fast-forward/pypy/jit/metainterp/resume.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/resume.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/resume.py Thu Sep 30 00:16:20 2010 @@ -4,10 +4,12 @@ from pypy.jit.metainterp.history import INT, REF, FLOAT, HOLE from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp import jitprof -from pypy.rpython.lltypesystem import lltype, llmemory, rffi +from pypy.jit.codewriter.effectinfo import EffectInfo, callinfo_for_oopspec +from pypy.jit.codewriter.effectinfo import funcptr_for_oopspec +from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rstr from pypy.rlib import rarithmetic from pypy.rlib.objectmodel import we_are_translated, specialize -from pypy.rlib.debug import have_debug_prints +from pypy.rlib.debug import have_debug_prints, ll_assert from pypy.rlib.debug import debug_start, debug_stop, debug_print # Logic to encode the chain of frames and the state of the boxes at a @@ -253,6 +255,15 @@ def make_varray(self, arraydescr): return VArrayInfo(arraydescr) + def make_vstrplain(self): + return VStrPlainInfo() + + def make_vstrconcat(self): + return VStrConcatInfo() + + def make_vstrslice(self): + return VStrSliceInfo() + def register_virtual_fields(self, virtualbox, fieldboxes): tagged = self.liveboxes_from_env.get(virtualbox, UNASSIGNEDVIRTUAL) self.liveboxes[virtualbox] = tagged @@ -397,9 +408,7 @@ class AbstractVirtualInfo(object): - #def allocate(self, metainterp): - # raise NotImplementedError - #def setfields(self, decoder, struct): + #def allocate(self, decoder, index): # raise NotImplementedError def equals(self, fieldnums): return tagged_list_eq(self.fieldnums, fieldnums) @@ -419,6 +428,7 @@ for i in range(len(self.fielddescrs)): descr = self.fielddescrs[i] decoder.setfield(descr, struct, self.fieldnums[i]) + return struct def debug_prints(self): assert len(self.fielddescrs) == len(self.fieldnums) @@ -433,8 +443,10 @@ self.known_class = known_class @specialize.argtype(1) - def allocate(self, decoder): - return decoder.allocate_with_vtable(self.known_class) + def allocate(self, decoder, index): + struct = decoder.allocate_with_vtable(self.known_class) + decoder.virtuals_cache[index] = struct + return self.setfields(decoder, struct) def debug_prints(self): debug_print("\tvirtualinfo", self.known_class.repr_rpython()) @@ -446,8 +458,10 @@ self.typedescr = typedescr @specialize.argtype(1) - def allocate(self, decoder): - return decoder.allocate_struct(self.typedescr) + def allocate(self, decoder, index): + struct = decoder.allocate_struct(self.typedescr) + decoder.virtuals_cache[index] = struct + return self.setfields(decoder, struct) def debug_prints(self): debug_print("\tvstructinfo", self.typedescr.repr_rpython()) @@ -459,14 +473,11 @@ #self.fieldnums = ... @specialize.argtype(1) - def allocate(self, decoder): + def allocate(self, decoder, index): length = len(self.fieldnums) - return decoder.allocate_array(self.arraydescr, length) - - @specialize.argtype(1) - def setfields(self, decoder, array): arraydescr = self.arraydescr - length = len(self.fieldnums) + array = decoder.allocate_array(arraydescr, length) + decoder.virtuals_cache[index] = array # NB. the check for the kind of array elements is moved out of the loop if arraydescr.is_array_of_pointers(): for i in range(length): @@ -480,12 +491,65 @@ for i in range(length): decoder.setarrayitem_int(arraydescr, array, i, self.fieldnums[i]) + return array def debug_prints(self): debug_print("\tvarrayinfo", self.arraydescr) for i in self.fieldnums: debug_print("\t\t", str(untag(i))) + +class VStrPlainInfo(AbstractVirtualInfo): + """Stands for the string made out of the characters of all fieldnums.""" + + @specialize.argtype(1) + def allocate(self, decoder, index): + length = len(self.fieldnums) + string = decoder.allocate_string(length) + decoder.virtuals_cache[index] = string + for i in range(length): + decoder.string_setitem(string, i, self.fieldnums[i]) + return string + + def debug_prints(self): + debug_print("\tvstrplaininfo length", len(self.fieldnums)) + + +class VStrConcatInfo(AbstractVirtualInfo): + """Stands for the string made out of the concatenation of two + other strings.""" + + @specialize.argtype(1) + def allocate(self, decoder, index): + # xxx for blackhole resuming, this will build all intermediate + # strings and throw them away immediately, which is a bit sub- + # efficient. Not sure we care. + left, right = self.fieldnums + string = decoder.concat_strings(left, right) + decoder.virtuals_cache[index] = string + return string + + def debug_prints(self): + debug_print("\tvstrconcatinfo") + for i in self.fieldnums: + debug_print("\t\t", str(untag(i))) + + +class VStrSliceInfo(AbstractVirtualInfo): + """Stands for the string made out of slicing another string.""" + + @specialize.argtype(1) + def allocate(self, decoder, index): + largerstr, start, length = self.fieldnums + string = decoder.slice_string(largerstr, start, length) + decoder.virtuals_cache[index] = string + return string + + def debug_prints(self): + debug_print("\tvstrsliceinfo") + for i in self.fieldnums: + debug_print("\t\t", str(untag(i))) + # ____________________________________________________________ class AbstractResumeDataReader(object): @@ -496,7 +560,8 @@ blackholing and want the best performance. """ _mixin_ = True - virtuals = None + rd_virtuals = None + virtuals_cache = None virtual_default = None def _init(self, cpu, storage): @@ -508,17 +573,29 @@ self._prepare_virtuals(storage.rd_virtuals) self._prepare_pendingfields(storage.rd_pendingfields) + def getvirtual(self, index): + # Returns the index'th virtual, building it lazily if needed. + # Note that this may be called recursively; that's why the + # allocate() methods must fill in the cache as soon as they + # have the object, before they fill its fields. + v = self.virtuals_cache[index] + if not v: + v = self.rd_virtuals[index].allocate(self, index) + ll_assert(v == self.virtuals_cache[index], "resume.py: bad cache") + return v + + def force_all_virtuals(self): + rd_virtuals = self.rd_virtuals + if rd_virtuals: + for i in range(len(rd_virtuals)): + if rd_virtuals[i] is not None: + self.getvirtual(i) + return self.virtuals_cache + def _prepare_virtuals(self, virtuals): if virtuals: - self.virtuals = [self.virtual_default] * len(virtuals) - for i in range(len(virtuals)): - vinfo = virtuals[i] - if vinfo is not None: - self.virtuals[i] = vinfo.allocate(self) - for i in range(len(virtuals)): - vinfo = virtuals[i] - if vinfo is not None: - vinfo.setfields(self, self.virtuals[i]) + self.rd_virtuals = virtuals + self.virtuals_cache = [self.virtual_default] * len(virtuals) def _prepare_pendingfields(self, pendingfields): if pendingfields is not None: @@ -622,6 +699,32 @@ return self.metainterp.execute_and_record(rop.NEW_ARRAY, arraydescr, ConstInt(length)) + def allocate_string(self, length): + return self.metainterp.execute_and_record(rop.NEWSTR, + None, ConstInt(length)) + + def string_setitem(self, strbox, index, charnum): + charbox = self.decode_box(charnum, INT) + self.metainterp.execute_and_record(rop.STRSETITEM, None, + strbox, ConstInt(index), charbox) + + def concat_strings(self, str1num, str2num): + calldescr, func = callinfo_for_oopspec(EffectInfo.OS_STR_CONCAT) + str1box = self.decode_box(str1num, REF) + str2box = self.decode_box(str2num, REF) + return self.metainterp.execute_and_record_varargs( + rop.CALL, [ConstInt(func), str1box, str2box], calldescr) + + def slice_string(self, strnum, startnum, lengthnum): + calldescr, func = callinfo_for_oopspec(EffectInfo.OS_STR_SLICE) + strbox = self.decode_box(strnum, REF) + startbox = self.decode_box(startnum, INT) + lengthbox = self.decode_box(lengthnum, INT) + stopbox = self.metainterp.execute_and_record(rop.INT_ADD, None, + startbox, lengthbox) + return self.metainterp.execute_and_record_varargs( + rop.CALL, [ConstInt(func), strbox, startbox, stopbox], calldescr) + def setfield(self, descr, structbox, fieldnum): if descr.is_pointer_field(): kind = REF @@ -663,9 +766,7 @@ else: box = self.consts[num] elif tag == TAGVIRTUAL: - virtuals = self.virtuals - assert virtuals is not None - box = virtuals[num] + box = self.getvirtual(num) elif tag == TAGINT: box = ConstInt(num) else: @@ -750,7 +851,7 @@ resumereader.handling_async_forcing() vrefinfo = metainterp_sd.virtualref_info resumereader.consume_vref_and_vable(vrefinfo, vinfo) - return resumereader.virtuals + return resumereader.force_all_virtuals() class ResumeDataDirectReader(AbstractResumeDataReader): unique_id = lambda: None @@ -768,7 +869,9 @@ # special case for resuming after a GUARD_NOT_FORCED: we already # have the virtuals self.resume_after_guard_not_forced = 2 - self.virtuals = all_virtuals + self.virtuals_cache = all_virtuals + # self.rd_virtuals can remain None, because virtuals_cache is + # already filled def handling_async_forcing(self): self.resume_after_guard_not_forced = 1 @@ -839,6 +942,31 @@ def allocate_array(self, arraydescr, length): return self.cpu.bh_new_array(arraydescr, length) + def allocate_string(self, length): + return self.cpu.bh_newstr(length) + + def string_setitem(self, str, index, charnum): + char = self.decode_int(charnum) + self.cpu.bh_strsetitem(str, index, char) + + def concat_strings(self, str1num, str2num): + str1 = self.decode_ref(str1num) + str2 = self.decode_ref(str2num) + str1 = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), str1) + str2 = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), str2) + funcptr = funcptr_for_oopspec(EffectInfo.OS_STR_CONCAT) + result = funcptr(str1, str2) + return lltype.cast_opaque_ptr(llmemory.GCREF, result) + + def slice_string(self, strnum, startnum, lengthnum): + str = self.decode_ref(strnum) + start = self.decode_int(startnum) + length = self.decode_int(lengthnum) + str = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), str) + funcptr = funcptr_for_oopspec(EffectInfo.OS_STR_SLICE) + result = funcptr(str, start, start + length) + return lltype.cast_opaque_ptr(llmemory.GCREF, result) + def setfield(self, descr, struct, fieldnum): if descr.is_pointer_field(): newvalue = self.decode_ref(fieldnum) @@ -881,9 +1009,7 @@ return self.cpu.ts.NULLREF return self.consts[num].getref_base() elif tag == TAGVIRTUAL: - virtuals = self.virtuals - assert virtuals is not None - return virtuals[num] + return self.getvirtual(num) else: assert tag == TAGBOX if num < 0: Modified: pypy/branch/fast-forward/pypy/jit/metainterp/simple_optimize.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/simple_optimize.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/simple_optimize.py Thu Sep 30 00:16:20 2010 @@ -9,17 +9,14 @@ def transform(op): from pypy.jit.metainterp.history import AbstractDescr - # change ARRAYCOPY to call, so we don't have to pass around - # unnecessary information to the backend. Do the same with VIRTUAL_REF_*. - if op.opnum == rop.ARRAYCOPY: - descr = op.args[0] - assert isinstance(descr, AbstractDescr) - op = ResOperation(rop.CALL, op.args[1:], op.result, descr=descr) - elif op.opnum == rop.CALL_PURE: - op = ResOperation(rop.CALL, op.args[1:], op.result, op.descr) - elif op.opnum == rop.VIRTUAL_REF: - op = ResOperation(rop.SAME_AS, [op.args[0]], op.result) - elif op.opnum == rop.VIRTUAL_REF_FINISH: + # Rename CALL_PURE to CALL. + # Simplify the VIRTUAL_REF_* so that they don't show up in the backend. + if op.getopnum() == rop.CALL_PURE: + op = ResOperation(rop.CALL, op.getarglist()[1:], op.result, + op.getdescr()) + elif op.getopnum() == rop.VIRTUAL_REF: + op = ResOperation(rop.SAME_AS, [op.getarg(0)], op.result) + elif op.getopnum() == rop.VIRTUAL_REF_FINISH: return [] return [op] @@ -36,7 +33,7 @@ newoperations = [] for op in loop.operations: if op.is_guard(): - descr = op.descr + descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) modifier = resume.ResumeDataVirtualAdder(descr, memo) newboxes = modifier.finish(EMPTY_VALUES) Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/oparser.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/oparser.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/oparser.py Thu Sep 30 00:16:20 2010 @@ -5,28 +5,39 @@ from pypy.jit.metainterp.history import TreeLoop, BoxInt, ConstInt,\ ConstObj, ConstPtr, Box, BasicFailDescr, BoxFloat, ConstFloat,\ - LoopToken -from pypy.jit.metainterp.resoperation import rop, ResOperation + LoopToken, get_const_ptr_for_string +from pypy.jit.metainterp.resoperation import rop, ResOperation, ResOpWithDescr, N_aryOp from pypy.jit.metainterp.typesystem import llhelper from pypy.jit.codewriter.heaptracker import adr2int from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype -from pypy.rpython.annlowlevel import llstr class ParseError(Exception): pass - class Boxes(object): pass +class ESCAPE_OP(N_aryOp, ResOpWithDescr): + + OPNUM = -123 + + def __init__(self, opnum, args, result, descr=None): + assert opnum == self.OPNUM + self.result = result + self.initarglist(args) + self.setdescr(descr) + + def getopnum(self): + return self.OPNUM + class ExtendedTreeLoop(TreeLoop): def getboxes(self): def opboxes(operations): for op in operations: yield op.result - for box in op.args: + for box in op.getarglist(): yield box def allboxes(): for box in self.inputargs: @@ -52,7 +63,8 @@ class OpParser(object): def __init__(self, input, cpu, namespace, type_system, boxkinds, - invent_fail_descr=default_fail_descr): + invent_fail_descr=default_fail_descr, + nonstrict=False): self.input = input self.vars = {} self.cpu = cpu @@ -64,6 +76,7 @@ else: self._cache = {} self.invent_fail_descr = invent_fail_descr + self.nonstrict = nonstrict self.looptoken = LoopToken() def get_const(self, name, typ): @@ -122,11 +135,14 @@ vars = [] for elem in elements: elem = elem.strip() - box = self.box_for_var(elem) - vars.append(box) - self.vars[elem] = box + vars.append(self.newvar(elem)) return vars + def newvar(self, elem): + box = self.box_for_var(elem) + self.vars[elem] = box + return box + def is_float(self, arg): try: float(arg) @@ -145,8 +161,7 @@ if arg.startswith('"') or arg.startswith("'"): # XXX ootype info = arg.strip("'\"") - return ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, - llstr(info))) + return get_const_ptr_for_string(info) if arg.startswith('ConstClass('): name = arg[len('ConstClass('):-1] return self.get_const(name, 'class') @@ -160,6 +175,8 @@ elif arg.startswith('ConstPtr('): name = arg[len('ConstPtr('):-1] return self.get_const(name, 'ptr') + if arg not in self.vars and self.nonstrict: + self.newvar(arg) return self.vars[arg] def parse_op(self, line): @@ -171,7 +188,7 @@ opnum = getattr(rop, opname.upper()) except AttributeError: if opname == 'escape': - opnum = -123 + opnum = ESCAPE_OP.OPNUM else: raise ParseError("unknown op: %s" % opname) endnum = line.rfind(')') @@ -184,7 +201,8 @@ if opname == 'debug_merge_point': allargs = [argspec] else: - allargs = argspec.split(",") + allargs = [arg for arg in argspec.split(",") + if arg != ''] poss_descr = allargs[-1].strip() if poss_descr.startswith('descr='): @@ -199,7 +217,7 @@ if rop._GUARD_FIRST <= opnum <= rop._GUARD_LAST: i = line.find('[', endnum) + 1 j = line.find(']', i) - if i <= 0 or j <= 0: + if (i <= 0 or j <= 0) and not self.nonstrict: raise ParseError("missing fail_args for guard operation") fail_args = [] if i < j: @@ -228,6 +246,12 @@ descr = self.looptoken return opnum, args, descr, fail_args + def create_op(self, opnum, args, result, descr): + if opnum == ESCAPE_OP.OPNUM: + return ESCAPE_OP(opnum, args, result, descr) + else: + return ResOperation(opnum, args, result, descr) + def parse_result_op(self, line): res, op = line.split("=", 1) res = res.strip() @@ -237,14 +261,16 @@ raise ParseError("Double assign to var %s in line: %s" % (res, line)) rvar = self.box_for_var(res) self.vars[res] = rvar - res = ResOperation(opnum, args, rvar, descr) - res.fail_args = fail_args + res = self.create_op(opnum, args, rvar, descr) + if fail_args is not None: + res.setfailargs(fail_args) return res def parse_op_no_result(self, line): opnum, args, descr, fail_args = self.parse_op(line) - res = ResOperation(opnum, args, None, descr) - res.fail_args = fail_args + res = self.create_op(opnum, args, None, descr) + if fail_args is not None: + res.setfailargs(fail_args) return res def parse_next_op(self, line): @@ -257,11 +283,14 @@ lines = self.input.splitlines() ops = [] newlines = [] + first_comment = None for line in lines: # for simplicity comments are not allowed on # debug_merge_point lines if '#' in line and 'debug_merge_point(' not in line: if line.lstrip()[0] == '#': # comment only + if first_comment is None: + first_comment = line continue comm = line.rfind('#') rpar = line.find(')') # assume there's a op(...) @@ -270,12 +299,12 @@ if not line.strip(): continue # a comment or empty line newlines.append(line) - base_indent, inpargs = self.parse_inpargs(newlines[0]) - newlines = newlines[1:] + base_indent, inpargs, newlines = self.parse_inpargs(newlines) num, ops = self.parse_ops(base_indent, newlines, 0) if num < len(newlines): raise ParseError("unexpected dedent at line: %s" % newlines[num]) loop = ExtendedTreeLoop("loop") + loop.comment = first_comment loop.token = self.looptoken loop.operations = ops loop.inputargs = inpargs @@ -296,23 +325,27 @@ num += 1 return num, ops - def parse_inpargs(self, line): - base_indent = line.find('[') + def parse_inpargs(self, lines): + line = lines[0] + base_indent = len(line) - len(line.lstrip(' ')) line = line.strip() + if not line.startswith('[') and self.nonstrict: + return base_indent, [], lines + lines = lines[1:] if line == '[]': - return base_indent, [] - if base_indent == -1 or not line.endswith(']'): + return base_indent, [], lines + if not line.startswith('[') or not line.endswith(']'): raise ParseError("Wrong header: %s" % line) inpargs = self.parse_header_line(line[1:-1]) - return base_indent, inpargs + return base_indent, inpargs, lines def parse(input, cpu=None, namespace=None, type_system='lltype', boxkinds=None, invent_fail_descr=default_fail_descr, - no_namespace=False): + no_namespace=False, nonstrict=False): if namespace is None and not no_namespace: namespace = {} return OpParser(input, cpu, namespace, type_system, boxkinds, - invent_fail_descr).parse() + invent_fail_descr, nonstrict).parse() def pure_parse(*args, **kwds): kwds['invent_fail_descr'] = None Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_basic.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_basic.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_basic.py Thu Sep 30 00:16:20 2010 @@ -296,7 +296,7 @@ found = 0 for op in get_stats().loops[0]._all_operations(): if op.getopname() == 'guard_true': - liveboxes = op.fail_args + liveboxes = op.getfailargs() assert len(liveboxes) == 3 for box in liveboxes: assert isinstance(box, history.BoxInt) Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_logger.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_logger.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_logger.py Thu Sep 30 00:16:20 2010 @@ -100,8 +100,8 @@ debug_merge_point("info") ''' loop, oloop = self.reparse(inp, check_equal=False) - assert loop.operations[0].args[0]._get_str() == 'info' - assert oloop.operations[0].args[0]._get_str() == 'info' + assert loop.operations[0].getarg(0)._get_str() == 'info' + assert oloop.operations[0].getarg(0)._get_str() == 'info' def test_floats(self): inp = ''' Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_loop.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_loop.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_loop.py Thu Sep 30 00:16:20 2010 @@ -178,7 +178,7 @@ found = 0 for op in get_stats().loops[0]._all_operations(): if op.getopname() == 'guard_true': - liveboxes = op.fail_args + liveboxes = op.getfailargs() assert len(liveboxes) == 2 # x, y (in some order) assert isinstance(liveboxes[0], history.BoxInt) assert isinstance(liveboxes[1], history.BoxInt) Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_oparser.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_oparser.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_oparser.py Thu Sep 30 00:16:20 2010 @@ -16,10 +16,10 @@ """ loop = parse(x) assert len(loop.operations) == 3 - assert [op.opnum for op in loop.operations] == [rop.INT_ADD, rop.INT_SUB, + assert [op.getopnum() for op in loop.operations] == [rop.INT_ADD, rop.INT_SUB, rop.FINISH] assert len(loop.inputargs) == 2 - assert loop.operations[-1].descr + assert loop.operations[-1].getdescr() def test_const_ptr_subops(): x = """ @@ -30,8 +30,8 @@ vtable = lltype.nullptr(S) loop = parse(x, None, locals()) assert len(loop.operations) == 1 - assert loop.operations[0].descr - assert loop.operations[0].fail_args == [] + assert loop.operations[0].getdescr() + assert loop.operations[0].getfailargs() == [] def test_descr(): class Xyz(AbstractDescr): @@ -43,7 +43,7 @@ """ stuff = Xyz() loop = parse(x, None, locals()) - assert loop.operations[0].descr is stuff + assert loop.operations[0].getdescr() is stuff def test_after_fail(): x = """ @@ -64,7 +64,7 @@ """ stuff = Xyz() loop = parse(x, None, locals()) - assert loop.operations[0].descr is stuff + assert loop.operations[0].getdescr() is stuff def test_boxname(): x = """ @@ -111,7 +111,7 @@ TP = lltype.GcArray(lltype.Signed) NULL = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.nullptr(TP)) loop = parse(x, None, {'func_ptr' : NULL}) - assert loop.operations[0].args[0].value == NULL + assert loop.operations[0].getarg(0).value == NULL def test_jump_target(): x = ''' @@ -119,7 +119,7 @@ jump() ''' loop = parse(x) - assert loop.operations[0].descr is loop.token + assert loop.operations[0].getdescr() is loop.token def test_jump_target_other(): looptoken = LoopToken() @@ -128,7 +128,7 @@ jump(descr=looptoken) ''' loop = parse(x, namespace=locals()) - assert loop.operations[0].descr is looptoken + assert loop.operations[0].getdescr() is looptoken def test_floats(): x = ''' @@ -136,7 +136,7 @@ f1 = float_add(f0, 3.5) ''' loop = parse(x) - assert isinstance(loop.operations[0].args[0], BoxFloat) + assert isinstance(loop.operations[0].getarg(0), BoxFloat) def test_debug_merge_point(): x = ''' @@ -147,10 +147,10 @@ debug_merge_point('(stuff) #1') ''' loop = parse(x) - assert loop.operations[0].args[0]._get_str() == 'info' - assert loop.operations[1].args[0]._get_str() == 'info' - assert loop.operations[2].args[0]._get_str() == " info" - assert loop.operations[3].args[0]._get_str() == "(stuff) #1" + assert loop.operations[0].getarg(0)._get_str() == 'info' + assert loop.operations[1].getarg(0)._get_str() == 'info' + assert loop.operations[2].getarg(0)._get_str() == " info" + assert loop.operations[3].getarg(0)._get_str() == "(stuff) #1" def test_descr_with_obj_print(): @@ -174,3 +174,32 @@ def test_parse_no_namespace(): loop = parse(example_loop_log, no_namespace=True) + +def test_attach_comment_to_loop(): + loop = parse(example_loop_log, no_namespace=True) + assert loop.comment == '# bridge out of Guard12, 6 ops' + +def test_parse_new_with_comma(): + # this is generated by PYPYJITLOG, check that we can handle it + x = ''' + [] + p0 = new(, descr=) + ''' + loop = parse(x) + assert loop.operations[0].getopname() == 'new' + +def test_no_fail_args(): + x = ''' + [i0] + guard_true(i0, descr=) + ''' + loop = parse(x, nonstrict=True) + assert loop.operations[0].getfailargs() == [] + +def test_no_inputargs(): + x = ''' + i2 = int_add(i0, i1) + ''' + loop = parse(x, nonstrict=True) + assert loop.inputargs == [] + assert loop.operations[0].getopname() == 'int_add' Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizefindnode.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizefindnode.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizefindnode.py Thu Sep 30 00:16:20 2010 @@ -1,6 +1,6 @@ import py, random -from pypy.rpython.lltypesystem import lltype, llmemory, rclass +from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr from pypy.rpython.ootypesystem import ootype from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE @@ -115,6 +115,36 @@ mayforcevirtdescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([nextdescr], [], [], EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE)) + arraycopydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], oopspecindex=EffectInfo.OS_ARRAYCOPY)) + strconcatdescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], oopspecindex=EffectInfo.OS_STR_CONCAT)) + slicedescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], oopspecindex=EffectInfo.OS_STR_SLICE)) + strequaldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], oopspecindex=EffectInfo.OS_STR_EQUAL)) + streq_slice_checknull_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_SLICE_CHECKNULL)) + streq_slice_nonnull_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_SLICE_NONNULL)) + streq_slice_char_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_SLICE_CHAR)) + streq_nonnull_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_NONNULL)) + streq_nonnull_char_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_NONNULL_CHAR)) + streq_checknull_char_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_CHECKNULL_CHAR)) + streq_lengthok_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], + oopspecindex=EffectInfo.OS_STREQ_LENGTHOK)) + class LoopToken(AbstractDescr): pass asmdescr = LoopToken() # it can be whatever, it's not a descr though Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py Thu Sep 30 00:16:20 2010 @@ -42,7 +42,7 @@ opt = optimizeopt.Optimizer(FakeMetaInterpStaticData(LLtypeMixin.cpu), None) fdescr = ResumeGuardDescr(None, None) - op = ResOperation(rop.GUARD_TRUE, [], None, descr=fdescr) + op = ResOperation(rop.GUARD_TRUE, ['dummy'], None, descr=fdescr) # setup rd data fi0 = resume.FrameInfo(None, "code0", 11) fdescr.rd_frame_info_list = resume.FrameInfo(fi0, "code1", 33) @@ -50,11 +50,11 @@ fdescr.rd_snapshot = resume.Snapshot(snapshot0, [b1]) # opt.store_final_boxes_in_guard(op) - if op.fail_args == [b0, b1]: + if op.getfailargs() == [b0, b1]: assert fdescr.rd_numb.nums == [tag(1, TAGBOX)] assert fdescr.rd_numb.prev.nums == [tag(0, TAGBOX)] else: - assert op.fail_args == [b1, b0] + assert op.getfailargs() == [b1, b0] assert fdescr.rd_numb.nums == [tag(0, TAGBOX)] assert fdescr.rd_numb.prev.nums == [tag(1, TAGBOX)] assert fdescr.rd_virtuals is None @@ -140,24 +140,26 @@ print '%-39s| %s' % (txt1[:39], txt2[:39]) txt1 = txt1[39:] txt2 = txt2[39:] - assert op1.opnum == op2.opnum - assert len(op1.args) == len(op2.args) - for x, y in zip(op1.args, op2.args): + assert op1.getopnum() == op2.getopnum() + assert op1.numargs() == op2.numargs() + for i in range(op1.numargs()): + x = op1.getarg(i) + y = op2.getarg(i) assert x == remap.get(y, y) if op2.result in remap: assert op1.result == remap[op2.result] else: remap[op2.result] = op1.result - if op1.opnum != rop.JUMP: # xxx obscure - assert op1.descr == op2.descr - if op1.fail_args or op2.fail_args: - assert len(op1.fail_args) == len(op2.fail_args) + if op1.getopnum() != rop.JUMP: # xxx obscure + assert op1.getdescr() == op2.getdescr() + if op1.getfailargs() or op2.getfailargs(): + assert len(op1.getfailargs()) == len(op2.getfailargs()) if strict_fail_args: - for x, y in zip(op1.fail_args, op2.fail_args): + for x, y in zip(op1.getfailargs(), op2.getfailargs()): assert x == remap.get(y, y) else: - fail_args1 = set(op1.fail_args) - fail_args2 = set([remap.get(y, y) for y in op2.fail_args]) + fail_args1 = set(op1.getfailargs()) + fail_args2 = set([remap.get(y, y) for y in op2.getfailargs()]) assert fail_args1 == fail_args2 assert len(oplist1) == len(oplist2) print '-'*57 @@ -209,7 +211,7 @@ self.metainterp_sd = metainterp_sd self.original_greenkey = original_greenkey def store_final_boxes(self, op, boxes): - op.fail_args = boxes + op.setfailargs(boxes) def __eq__(self, other): return type(self) is type(other) # xxx obscure @@ -2361,8 +2363,8 @@ from pypy.jit.metainterp.test.test_resume import ResumeDataFakeReader from pypy.jit.metainterp.test.test_resume import MyMetaInterp guard_op, = [op for op in self.loop.operations if op.is_guard()] - fail_args = guard_op.fail_args - fdescr = guard_op.descr + fail_args = guard_op.getfailargs() + fdescr = guard_op.getdescr() assert fdescr.guard_opnum == guard_opnum reader = ResumeDataFakeReader(fdescr, fail_args, MyMetaInterp(self.cpu)) @@ -3080,7 +3082,7 @@ setarrayitem_gc(p1, 1, 1, descr=arraydescr) p2 = new_array(3, descr=arraydescr) setarrayitem_gc(p2, 1, 3, descr=arraydescr) - arraycopy(0, 0, p1, p2, 1, 1, 2, descr=arraydescr) + call(0, p1, p2, 1, 1, 2, descr=arraycopydescr) i2 = getarrayitem_gc(p2, 1, descr=arraydescr) jump(i2) ''' @@ -3097,7 +3099,7 @@ p2 = new_array(3, descr=arraydescr) setarrayitem_gc(p1, 0, i0, descr=arraydescr) setarrayitem_gc(p2, 0, 3, descr=arraydescr) - arraycopy(0, 0, p1, p2, 1, 1, 2, descr=arraydescr) + call(0, p1, p2, 1, 1, 2, descr=arraycopydescr) i2 = getarrayitem_gc(p2, 0, descr=arraydescr) jump(i2) ''' @@ -3114,7 +3116,7 @@ p2 = new_array(3, descr=arraydescr) setarrayitem_gc(p1, 2, 10, descr=arraydescr) setarrayitem_gc(p2, 2, 13, descr=arraydescr) - arraycopy(0, 0, p1, p2, 0, 0, 3, descr=arraydescr) + call(0, p1, p2, 0, 0, 3, descr=arraycopydescr) jump(p2) ''' expected = ''' @@ -3131,7 +3133,7 @@ ops = ''' [p1] p0 = new_array(0, descr=arraydescr) - arraycopy(0, 0, p0, p1, 0, 0, 0, descr=arraydescr) + call(0, p0, p1, 0, 0, 0, descr=arraycopydescr) jump(p1) ''' expected = ''' @@ -3891,7 +3893,606 @@ """ self.optimize_loop(ops, 'Not, Not', expected) + def test_newstr_1(self): + ops = """ + [i0] + p1 = newstr(1) + strsetitem(p1, 0, i0) + i1 = strgetitem(p1, 0) + jump(i1) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_newstr_2(self): + ops = """ + [i0, i1] + p1 = newstr(2) + strsetitem(p1, 0, i0) + strsetitem(p1, 1, i1) + i2 = strgetitem(p1, 1) + i3 = strgetitem(p1, 0) + jump(i2, i3) + """ + expected = """ + [i0, i1] + jump(i1, i0) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_str_concat_1(self): + ops = """ + [p1, p2] + p3 = call(0, p1, p2, descr=strconcatdescr) + jump(p2, p3) + """ + expected = """ + [p1, p2] + i1 = strlen(p1) + i2 = strlen(p2) + i3 = int_add(i1, i2) + p3 = newstr(i3) + i4 = strlen(p1) + copystrcontent(p1, p3, 0, 0, i4) + i5 = strlen(p2) + i6 = int_add(i4, i5) # will be killed by the backend + copystrcontent(p2, p3, 0, i4, i5) + jump(p2, p3) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_str_concat_vstr2_str(self): + ops = """ + [i0, i1, p2] + p1 = newstr(2) + strsetitem(p1, 0, i0) + strsetitem(p1, 1, i1) + p3 = call(0, p1, p2, descr=strconcatdescr) + jump(i1, i0, p3) + """ + expected = """ + [i0, i1, p2] + i2 = strlen(p2) + i3 = int_add(2, i2) + p3 = newstr(i3) + strsetitem(p3, 0, i0) + strsetitem(p3, 1, i1) + i4 = strlen(p2) + i5 = int_add(2, i4) # will be killed by the backend + copystrcontent(p2, p3, 0, 2, i4) + jump(i1, i0, p3) + """ + self.optimize_loop(ops, 'Not, Not, Not', expected) + + def test_str_concat_str_vstr2(self): + ops = """ + [i0, i1, p2] + p1 = newstr(2) + strsetitem(p1, 0, i0) + strsetitem(p1, 1, i1) + p3 = call(0, p2, p1, descr=strconcatdescr) + jump(i1, i0, p3) + """ + expected = """ + [i0, i1, p2] + i2 = strlen(p2) + i3 = int_add(i2, 2) + p3 = newstr(i3) + i4 = strlen(p2) + copystrcontent(p2, p3, 0, 0, i4) + strsetitem(p3, i4, i0) + i5 = int_add(i4, 1) + strsetitem(p3, i5, i1) + i6 = int_add(i5, 1) # will be killed by the backend + jump(i1, i0, p3) + """ + self.optimize_loop(ops, 'Not, Not, Not', expected) + + def test_str_concat_str_str_str(self): + ops = """ + [p1, p2, p3] + p4 = call(0, p1, p2, descr=strconcatdescr) + p5 = call(0, p4, p3, descr=strconcatdescr) + jump(p2, p3, p5) + """ + expected = """ + [p1, p2, p3] + i1 = strlen(p1) + i2 = strlen(p2) + i12 = int_add(i1, i2) + i3 = strlen(p3) + i123 = int_add(i12, i3) + p5 = newstr(i123) + i1b = strlen(p1) + copystrcontent(p1, p5, 0, 0, i1b) + i2b = strlen(p2) + i12b = int_add(i1b, i2b) + copystrcontent(p2, p5, 0, i1b, i2b) + i3b = strlen(p3) + i123b = int_add(i12b, i3b) # will be killed by the backend + copystrcontent(p3, p5, 0, i12b, i3b) + jump(p2, p3, p5) + """ + self.optimize_loop(ops, 'Not, Not, Not', expected) + + def test_str_concat_str_cstr1(self): + ops = """ + [p2] + p3 = call(0, p2, "x", descr=strconcatdescr) + jump(p3) + """ + expected = """ + [p2] + i2 = strlen(p2) + i3 = int_add(i2, 1) + p3 = newstr(i3) + i4 = strlen(p2) + copystrcontent(p2, p3, 0, 0, i4) + strsetitem(p3, i4, 120) # == ord('x') + i5 = int_add(i4, 1) # will be killed by the backend + jump(p3) + """ + self.optimize_loop(ops, 'Not', expected) + + def test_str_concat_consts(self): + ops = """ + [] + p1 = same_as("ab") + p2 = same_as("cde") + p3 = call(0, p1, p2, descr=strconcatdescr) + escape(p3) + jump() + """ + expected = """ + [] + escape("abcde") + jump() + """ + self.optimize_loop(ops, '', expected) + + def test_str_slice_1(self): + ops = """ + [p1, i1, i2] + p2 = call(0, p1, i1, i2, descr=slicedescr) + jump(p2, i1, i2) + """ + expected = """ + [p1, i1, i2] + i3 = int_sub(i2, i1) + p2 = newstr(i3) + copystrcontent(p1, p2, i1, 0, i3) + jump(p2, i1, i2) + """ + self.optimize_loop(ops, 'Not, Not, Not', expected) + + def test_str_slice_2(self): + ops = """ + [p1, i2] + p2 = call(0, p1, 0, i2, descr=slicedescr) + jump(p2, i2) + """ + expected = """ + [p1, i2] + p2 = newstr(i2) + copystrcontent(p1, p2, 0, 0, i2) + jump(p2, i2) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_str_slice_3(self): + ops = """ + [p1, i1, i2, i3, i4] + p2 = call(0, p1, i1, i2, descr=slicedescr) + p3 = call(0, p2, i3, i4, descr=slicedescr) + jump(p3, i1, i2, i3, i4) + """ + expected = """ + [p1, i1, i2, i3, i4] + i0 = int_sub(i2, i1) # killed by the backend + i5 = int_sub(i4, i3) + i6 = int_add(i1, i3) + p3 = newstr(i5) + copystrcontent(p1, p3, i6, 0, i5) + jump(p3, i1, i2, i3, i4) + """ + self.optimize_loop(ops, 'Not, Not, Not, Not, Not', expected) + + def test_str_slice_getitem1(self): + ops = """ + [p1, i1, i2, i3] + p2 = call(0, p1, i1, i2, descr=slicedescr) + i4 = strgetitem(p2, i3) + escape(i4) + jump(p1, i1, i2, i3) + """ + expected = """ + [p1, i1, i2, i3] + i6 = int_sub(i2, i1) # killed by the backend + i5 = int_add(i1, i3) + i4 = strgetitem(p1, i5) + escape(i4) + jump(p1, i1, i2, i3) + """ + self.optimize_loop(ops, 'Not, Not, Not, Not', expected) + + def test_str_slice_plain(self): + ops = """ + [i3, i4] + p1 = newstr(2) + strsetitem(p1, 0, i3) + strsetitem(p1, 1, i4) + p2 = call(0, p1, 1, 2, descr=slicedescr) + i5 = strgetitem(p2, 0) + escape(i5) + jump(i3, i4) + """ + expected = """ + [i3, i4] + escape(i4) + jump(i3, i4) + """ + self.optimize_loop(ops, 'Not, Not', expected) + + def test_str_slice_concat(self): + ops = """ + [p1, i1, i2, p2] + p3 = call(0, p1, i1, i2, descr=slicedescr) + p4 = call(0, p3, p2, descr=strconcatdescr) + jump(p4, i1, i2, p2) + """ + expected = """ + [p1, i1, i2, p2] + i3 = int_sub(i2, i1) # length of p3 + i4 = strlen(p2) + i5 = int_add(i3, i4) + p4 = newstr(i5) + copystrcontent(p1, p4, i1, 0, i3) + i4b = strlen(p2) + i6 = int_add(i3, i4b) # killed by the backend + copystrcontent(p2, p4, 0, i3, i4b) + jump(p4, i1, i2, p2) + """ + self.optimize_loop(ops, 'Not, Not, Not, Not', expected) + + # ---------- + def optimize_loop_extradescrs(self, ops, spectext, optops): + from pypy.jit.metainterp.optimizeopt import string + def my_callinfo_for_oopspec(oopspecindex): + calldescrtype = type(LLtypeMixin.strequaldescr) + for value in LLtypeMixin.__dict__.values(): + if isinstance(value, calldescrtype): + if (value.get_extra_info() and + value.get_extra_info().oopspecindex == oopspecindex): + # returns 0 for 'func' in this test + return value, 0 + raise AssertionError("not found: oopspecindex=%d" % oopspecindex) + # + saved = string.callinfo_for_oopspec + try: + string.callinfo_for_oopspec = my_callinfo_for_oopspec + self.optimize_loop(ops, spectext, optops) + finally: + string.callinfo_for_oopspec = saved + + def test_str_equal_noop1(self): + ops = """ + [p1, p2] + i0 = call(0, p1, p2, descr=strequaldescr) + escape(i0) + jump(p1, p2) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not', ops) + + def test_str_equal_noop2(self): + ops = """ + [p1, p2, p3] + p4 = call(0, p1, p2, descr=strconcatdescr) + i0 = call(0, p3, p4, descr=strequaldescr) + escape(i0) + jump(p1, p2, p3) + """ + expected = """ + [p1, p2, p3] + i1 = strlen(p1) + i2 = strlen(p2) + i3 = int_add(i1, i2) + p4 = newstr(i3) + i4 = strlen(p1) + copystrcontent(p1, p4, 0, 0, i4) + i5 = strlen(p2) + i6 = int_add(i4, i5) # will be killed by the backend + copystrcontent(p2, p4, 0, i4, i5) + i0 = call(0, p3, p4, descr=strequaldescr) + escape(i0) + jump(p1, p2, p3) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not, Not', expected) + + def test_str_equal_slice1(self): + ops = """ + [p1, i1, i2, p3] + p4 = call(0, p1, i1, i2, descr=slicedescr) + i0 = call(0, p4, p3, descr=strequaldescr) + escape(i0) + jump(p1, i1, i2, p3) + """ + expected = """ + [p1, i1, i2, p3] + i3 = int_sub(i2, i1) + i0 = call(0, p1, i1, i3, p3, descr=streq_slice_checknull_descr) + escape(i0) + jump(p1, i1, i2, p3) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not, Not, Not', expected) + + def test_str_equal_slice2(self): + ops = """ + [p1, i1, i2, p3] + p4 = call(0, p1, i1, i2, descr=slicedescr) + i0 = call(0, p3, p4, descr=strequaldescr) + escape(i0) + jump(p1, i1, i2, p3) + """ + expected = """ + [p1, i1, i2, p3] + i4 = int_sub(i2, i1) + i0 = call(0, p1, i1, i4, p3, descr=streq_slice_checknull_descr) + escape(i0) + jump(p1, i1, i2, p3) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not, Not, Not', expected) + + def test_str_equal_slice3(self): + ops = """ + [p1, i1, i2, p3] + guard_nonnull(p3) [] + p4 = call(0, p1, i1, i2, descr=slicedescr) + i0 = call(0, p3, p4, descr=strequaldescr) + escape(i0) + jump(p1, i1, i2, p3) + """ + expected = """ + [p1, i1, i2, p3] + guard_nonnull(p3) [] + i4 = int_sub(i2, i1) + i0 = call(0, p1, i1, i4, p3, descr=streq_slice_nonnull_descr) + escape(i0) + jump(p1, i1, i2, p3) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not, Not, Not', expected) + + def test_str_equal_slice4(self): + ops = """ + [p1, i1, i2] + p3 = call(0, p1, i1, i2, descr=slicedescr) + i0 = call(0, p3, "x", descr=strequaldescr) + escape(i0) + jump(p1, i1, i2) + """ + expected = """ + [p1, i1, i2] + i3 = int_sub(i2, i1) + i0 = call(0, p1, i1, i3, 120, descr=streq_slice_char_descr) + escape(i0) + jump(p1, i1, i2) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not, Not', expected) + + def test_str_equal_slice5(self): + ops = """ + [p1, i1, i2, i3] + p4 = call(0, p1, i1, i2, descr=slicedescr) + p5 = newstr(1) + strsetitem(p5, 0, i3) + i0 = call(0, p5, p4, descr=strequaldescr) + escape(i0) + jump(p1, i1, i2, i3) + """ + expected = """ + [p1, i1, i2, i3] + i4 = int_sub(i2, i1) + i0 = call(0, p1, i1, i4, i3, descr=streq_slice_char_descr) + escape(i0) + jump(p1, i1, i2, i3) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not, Not, Not', expected) + + def test_str_equal_none1(self): + ops = """ + [p1] + i0 = call(0, p1, NULL, descr=strequaldescr) + escape(i0) + jump(p1) + """ + expected = """ + [p1] + i0 = ptr_eq(p1, NULL) + escape(i0) + jump(p1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_none2(self): + ops = """ + [p1] + i0 = call(0, NULL, p1, descr=strequaldescr) + escape(i0) + jump(p1) + """ + expected = """ + [p1] + i0 = ptr_eq(p1, NULL) + escape(i0) + jump(p1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_nonnull1(self): + ops = """ + [p1] + guard_nonnull(p1) [] + i0 = call(0, p1, "hello world", descr=strequaldescr) + escape(i0) + jump(p1) + """ + expected = """ + [p1] + guard_nonnull(p1) [] + i0 = call(0, p1, "hello world", descr=streq_nonnull_descr) + escape(i0) + jump(p1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_nonnull2(self): + ops = """ + [p1] + guard_nonnull(p1) [] + i0 = call(0, p1, "", descr=strequaldescr) + escape(i0) + jump(p1) + """ + expected = """ + [p1] + guard_nonnull(p1) [] + i1 = strlen(p1) + i0 = int_eq(i1, 0) + escape(i0) + jump(p1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_nonnull3(self): + ops = """ + [p1] + guard_nonnull(p1) [] + i0 = call(0, p1, "x", descr=strequaldescr) + escape(i0) + jump(p1) + """ + expected = """ + [p1] + guard_nonnull(p1) [] + i0 = call(0, p1, 120, descr=streq_nonnull_char_descr) + escape(i0) + jump(p1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_nonnull4(self): + ops = """ + [p1, p2] + p4 = call(0, p1, p2, descr=strconcatdescr) + i0 = call(0, "hello world", p4, descr=strequaldescr) + escape(i0) + jump(p1, p2) + """ + expected = """ + [p1, p2] + i1 = strlen(p1) + i2 = strlen(p2) + i3 = int_add(i1, i2) + p4 = newstr(i3) + i4 = strlen(p1) + copystrcontent(p1, p4, 0, 0, i4) + i5 = strlen(p2) + i6 = int_add(i4, i5) # will be killed by the backend + copystrcontent(p2, p4, 0, i4, i5) + i0 = call(0, "hello world", p4, descr=streq_nonnull_descr) + escape(i0) + jump(p1, p2) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not', expected) + + def test_str_equal_chars0(self): + ops = """ + [i1] + p1 = newstr(0) + i0 = call(0, p1, "", descr=strequaldescr) + escape(i0) + jump(i1) + """ + expected = """ + [i1] + escape(1) + jump(i1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_chars1(self): + ops = """ + [i1] + p1 = newstr(1) + strsetitem(p1, 0, i1) + i0 = call(0, p1, "x", descr=strequaldescr) + escape(i0) + jump(i1) + """ + expected = """ + [i1] + i0 = int_eq(i1, 120) # ord('x') + escape(i0) + jump(i1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_chars2(self): + ops = """ + [i1, i2] + p1 = newstr(2) + strsetitem(p1, 0, i1) + strsetitem(p1, 1, i2) + i0 = call(0, p1, "xy", descr=strequaldescr) + escape(i0) + jump(i1, i2) + """ + expected = """ + [i1, i2] + p1 = newstr(2) + strsetitem(p1, 0, i1) + strsetitem(p1, 1, i2) + i0 = call(0, p1, "xy", descr=streq_lengthok_descr) + escape(i0) + jump(i1, i2) + """ + self.optimize_loop_extradescrs(ops, 'Not, Not', expected) + + def test_str_equal_chars3(self): + ops = """ + [p1] + i0 = call(0, "x", p1, descr=strequaldescr) + escape(i0) + jump(p1) + """ + expected = """ + [p1] + i0 = call(0, p1, 120, descr=streq_checknull_char_descr) + escape(i0) + jump(p1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + + def test_str_equal_lengthmismatch1(self): + ops = """ + [i1] + p1 = newstr(1) + strsetitem(p1, 0, i1) + i0 = call(0, "xy", p1, descr=strequaldescr) + escape(i0) + jump(i1) + """ + expected = """ + [i1] + escape(0) + jump(i1) + """ + self.optimize_loop_extradescrs(ops, 'Not', expected) + # XXX unicode operations + # XXX str2unicode ##class TestOOtype(BaseTestOptimizeOpt, OOtypeMixin): Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_recursive.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_recursive.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_recursive.py Thu Sep 30 00:16:20 2010 @@ -319,8 +319,8 @@ for loop in get_stats().loops: assert len(loop.operations) <= length + 5 # because we only check once per metainterp bytecode for op in loop.operations: - if op.is_guard() and hasattr(op.descr, '_debug_suboperations'): - assert len(op.descr._debug_suboperations) <= length + 5 + if op.is_guard() and hasattr(op.getdescr(), '_debug_suboperations'): + assert len(op.getdescr()._debug_suboperations) <= length + 5 def test_inline_trace_limit(self): myjitdriver = JitDriver(greens=[], reds=['n']) Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_resume.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_resume.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_resume.py Thu Sep 30 00:16:20 2010 @@ -199,10 +199,10 @@ def test_prepare_virtuals(): class FakeVinfo(object): - def allocate(self, decoder): - return "allocated" - def setfields(self, decoder, virtual): - assert virtual == "allocated" + def allocate(self, decoder, index): + s = "allocated" + decoder.virtuals_cache[index] = s + return s class FakeStorage(object): rd_virtuals = [FakeVinfo(), None] rd_numb = [] @@ -212,7 +212,97 @@ _already_allocated_resume_virtuals = None cpu = None reader = ResumeDataDirectReader(None, FakeStorage()) - assert reader.virtuals == ["allocated", reader.virtual_default] + assert reader.force_all_virtuals() == ["allocated", reader.virtual_default] + +# ____________________________________________________________ + +class FakeResumeDataReader(AbstractResumeDataReader): + def allocate_with_vtable(self, known_class): + return FakeBuiltObject(vtable=known_class) + def allocate_struct(self, typedescr): + return FakeBuiltObject(typedescr=typedescr) + def allocate_array(self, arraydescr, length): + return FakeBuiltObject(arraydescr=arraydescr, items=[None]*length) + def setfield(self, descr, struct, fieldnum): + setattr(struct, descr, fieldnum) + def setarrayitem_int(self, arraydescr, array, i, fieldnum): + assert 0 <= i < len(array.items) + assert arraydescr is array.arraydescr + array.items[i] = fieldnum + def allocate_string(self, length): + return FakeBuiltObject(string=[None]*length) + def string_setitem(self, string, i, fieldnum): + value, tag = untag(fieldnum) + assert tag == TAGINT + assert 0 <= i < len(string.string) + string.string[i] = value + def concat_strings(self, left, right): + return FakeBuiltObject(strconcat=[left, right]) + def slice_string(self, str, start, length): + return FakeBuiltObject(strslice=[str, start, length]) + +class FakeBuiltObject(object): + def __init__(self, **kwds): + self.__dict__ = kwds + def __eq__(self, other): + return (self.__class__ == other.__class__ and + self.__dict__ == other.__dict__) + def __repr__(self): + return 'FakeBuiltObject(%s)' % ( + ', '.join(['%s=%r' % item for item in self.__dict__.items()])) + +class FakeArrayDescr(object): + def is_array_of_pointers(self): return False + def is_array_of_floats(self): return False + +def test_virtualinfo(): + info = VirtualInfo(123, ["fielddescr1"]) + info.fieldnums = [tag(456, TAGINT)] + reader = FakeResumeDataReader() + reader._prepare_virtuals([info]) + assert reader.force_all_virtuals() == [ + FakeBuiltObject(vtable=123, fielddescr1=tag(456, TAGINT))] + +def test_vstructinfo(): + info = VStructInfo(124, ["fielddescr1"]) + info.fieldnums = [tag(456, TAGINT)] + reader = FakeResumeDataReader() + reader._prepare_virtuals([info]) + assert reader.force_all_virtuals() == [ + FakeBuiltObject(typedescr=124, fielddescr1=tag(456, TAGINT))] + +def test_varrayinfo(): + arraydescr = FakeArrayDescr() + info = VArrayInfo(arraydescr) + info.fieldnums = [tag(456, TAGINT)] + reader = FakeResumeDataReader() + reader._prepare_virtuals([info]) + assert reader.force_all_virtuals() == [ + FakeBuiltObject(arraydescr=arraydescr, items=[tag(456, TAGINT)])] + +def test_vstrplaininfo(): + info = VStrPlainInfo() + info.fieldnums = [tag(60, TAGINT)] + reader = FakeResumeDataReader() + reader._prepare_virtuals([info]) + assert reader.force_all_virtuals() == [ + FakeBuiltObject(string=[60])] + +def test_vstrconcatinfo(): + info = VStrConcatInfo() + info.fieldnums = [tag(10, TAGBOX), tag(20, TAGBOX)] + reader = FakeResumeDataReader() + reader._prepare_virtuals([info]) + assert reader.force_all_virtuals() == [ + FakeBuiltObject(strconcat=info.fieldnums)] + +def test_vstrsliceinfo(): + info = VStrSliceInfo() + info.fieldnums = [tag(10, TAGBOX), tag(20, TAGBOX), tag(30, TAGBOX)] + reader = FakeResumeDataReader() + reader._prepare_virtuals([info]) + assert reader.force_all_virtuals() == [ + FakeBuiltObject(strslice=info.fieldnums)] # ____________________________________________________________ @@ -957,7 +1047,7 @@ metainterp = MyMetaInterp() reader = ResumeDataFakeReader(storage, newboxes, metainterp) - assert len(reader.virtuals) == 2 + assert len(reader.virtuals_cache) == 2 b2t = reader.decode_ref(modifier._gettagged(b2s)) b4t = reader.decode_ref(modifier._gettagged(b4s)) trace = metainterp.trace @@ -972,13 +1062,14 @@ b4set = [(rop.SETFIELD_GC, [b4t, b2t], None, LLtypeMixin.nextdescr), (rop.SETFIELD_GC, [b4t, b3t], None, LLtypeMixin.valuedescr), (rop.SETFIELD_GC, [b4t, b5t], None, LLtypeMixin.otherdescr)] - if untag(modifier._gettagged(b2s))[0] == -2: - expected = [b2new, b4new] + b2set + b4set - else: - expected = [b4new, b2new] + b4set + b2set - - for x, y in zip(expected, trace): - assert x == y + expected = [b2new, b4new] + b4set + b2set + + # check that we get the operations in 'expected', in a possibly different + # order. + assert len(trace) == len(expected) + for x in trace: + assert x in expected + expected.remove(x) ptr = b2t.value._obj.container._as_ptr() assert lltype.typeOf(ptr) == lltype.Ptr(LLtypeMixin.NODE) assert ptr.value == 111 @@ -1020,7 +1111,7 @@ # resume metainterp = MyMetaInterp() reader = ResumeDataFakeReader(storage, newboxes, metainterp) - assert len(reader.virtuals) == 1 + assert len(reader.virtuals_cache) == 1 b2t = reader.decode_ref(tag(0, TAGVIRTUAL)) trace = metainterp.trace expected = [ @@ -1065,7 +1156,7 @@ NULL = ConstPtr.value metainterp = MyMetaInterp() reader = ResumeDataFakeReader(storage, newboxes, metainterp) - assert len(reader.virtuals) == 1 + assert len(reader.virtuals_cache) == 1 b2t = reader.decode_ref(tag(0, TAGVIRTUAL)) trace = metainterp.trace @@ -1112,7 +1203,7 @@ metainterp = MyMetaInterp() reader = ResumeDataFakeReader(storage, newboxes, metainterp) - assert reader.virtuals is None + assert reader.virtuals_cache is None trace = metainterp.trace b2set = (rop.SETFIELD_GC, [b2t, b4t], None, LLtypeMixin.nextdescr) expected = [b2set] Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_string.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_string.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_string.py Thu Sep 30 00:16:20 2010 @@ -1,5 +1,5 @@ import py -from pypy.rlib.jit import JitDriver +from pypy.rlib.jit import JitDriver, dont_look_inside, we_are_jitted from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rpython.ootypesystem import ootype from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin @@ -72,6 +72,234 @@ res = self.meta_interp(f, [6, 10]) assert res == 6 + def test_char2string_pure(self): + for dochr in [chr, ]: #unichr]: + jitdriver = JitDriver(greens = [], reds = ['n']) + @dont_look_inside + def escape(x): + pass + def f(n): + while n > 0: + jitdriver.can_enter_jit(n=n) + jitdriver.jit_merge_point(n=n) + s = dochr(n) + if not we_are_jitted(): + s += s # forces to be a string + if n > 100: + escape(s) + n -= 1 + return 42 + self.meta_interp(f, [6]) + self.check_loops(newstr=0, strsetitem=0, strlen=0, + newunicode=0, unicodesetitem=0, unicodelen=0) + + def test_char2string_escape(self): + for dochr in [chr, ]: #unichr]: + jitdriver = JitDriver(greens = [], reds = ['n', 'total']) + @dont_look_inside + def escape(x): + return ord(x[0]) + def f(n): + total = 0 + while n > 0: + jitdriver.can_enter_jit(n=n, total=total) + jitdriver.jit_merge_point(n=n, total=total) + s = dochr(n) + if not we_are_jitted(): + s += s # forces to be a string + total += escape(s) + n -= 1 + return total + res = self.meta_interp(f, [6]) + assert res == 21 + + def test_char2string2char(self): + for dochr in [chr, ]: #unichr]: + jitdriver = JitDriver(greens = [], reds = ['m', 'total']) + def f(m): + total = 0 + while m > 0: + jitdriver.can_enter_jit(m=m, total=total) + jitdriver.jit_merge_point(m=m, total=total) + string = dochr(m) + if m > 100: + string += string # forces to be a string + # read back the character + c = string[0] + total += ord(c) + m -= 1 + return total + res = self.meta_interp(f, [6]) + assert res == 21 + self.check_loops(newstr=0, strgetitem=0, strsetitem=0, strlen=0, + newunicode=0, unicodegetitem=0, unicodesetitem=0, + unicodelen=0) + + def test_strconcat_pure(self): + for somestr in ["abc", ]: #u"def"]: + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + mylist = [somestr+str(i) for i in range(10)] + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = mylist[n] + mylist[m] + if m > 100: + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=0, strsetitem=0, + newunicode=0, unicodesetitem=0, + call=0, call_pure=0) + + def test_strconcat_escape_str_str(self): + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + mylist = ["somestr"+str(i) for i in range(10)] + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = mylist[n] + mylist[m] + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=1, strsetitem=0, copystrcontent=2, + call=1, call_pure=0) # escape + + def test_strconcat_escape_str_char(self): + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + mylist = ["somestr"+str(i) for i in range(10)] + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = mylist[n] + chr(m) + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=1, strsetitem=1, copystrcontent=1, + call=1, call_pure=0) # escape + + def test_strconcat_escape_char_str(self): + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + mylist = ["somestr"+str(i) for i in range(10)] + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = chr(n) + mylist[m] + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=1, strsetitem=1, copystrcontent=1, + call=1, call_pure=0) # escape + + def test_strconcat_escape_char_char(self): + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = chr(n) + chr(m) + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=1, strsetitem=2, copystrcontent=0, + call=1, call_pure=0) # escape + + def test_strconcat_escape_str_char_str(self): + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + mylist = ["somestr"+str(i) for i in range(10)] + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = mylist[n] + chr(n) + mylist[m] + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=1, strsetitem=1, copystrcontent=2, + call=1, call_pure=0) # escape + + def test_strconcat_guard_fail(self): + for somestr in ["abc", ]: #u"def"]: + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + mylist = [somestr+str(i) for i in range(12)] + def f(n, m): + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = mylist[n] + mylist[m] + if m & 1: + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [6, 10]) + + def test_strslice(self): + for somestr in ["abc", ]: #u"def"]: + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + def f(n, m): + assert n >= 0 + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = "foobarbazetc"[m:n] + if m <= 5: + escape(s) + m -= 1 + return 42 + self.meta_interp(f, [10, 10]) + + def test_streq_char(self): + for somestr in ["?abcdefg", ]: #u"def"]: + jitdriver = JitDriver(greens = [], reds = ['m', 'n']) + @dont_look_inside + def escape(x): + pass + def f(n, m): + assert n >= 0 + while m >= 0: + jitdriver.can_enter_jit(m=m, n=n) + jitdriver.jit_merge_point(m=m, n=n) + s = somestr[:m] + escape(s == "?") + m -= 1 + return 42 + self.meta_interp(f, [6, 7]) + self.check_loops(newstr=0, newunicode=0) + + class TestOOtype(StringTests, OOJitMixin): CALL = "oosend" CALL_PURE = "oosend_pure" Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_virtualref.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_virtualref.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_virtualref.py Thu Sep 30 00:16:20 2010 @@ -71,11 +71,11 @@ # ops = self.metainterp.staticdata.stats.loops[0].operations [guard_op] = [op for op in ops - if op.opnum == rop.GUARD_NOT_FORCED] - bxs1 = [box for box in guard_op.fail_args + if op.getopnum() == rop.GUARD_NOT_FORCED] + bxs1 = [box for box in guard_op.getfailargs() if str(box._getrepr_()).endswith('.X')] assert len(bxs1) == 1 - bxs2 = [box for box in guard_op.fail_args + bxs2 = [box for box in guard_op.getfailargs() if str(box._getrepr_()).endswith('JitVirtualRef')] assert len(bxs2) == 1 JIT_VIRTUAL_REF = self.vrefinfo.JIT_VIRTUAL_REF @@ -84,11 +84,11 @@ # try reloading from blackhole.py's point of view from pypy.jit.metainterp.resume import ResumeDataDirectReader cpu = self.metainterp.cpu - cpu.get_latest_value_count = lambda : len(guard_op.fail_args) - cpu.get_latest_value_int = lambda i:guard_op.fail_args[i].getint() - cpu.get_latest_value_ref = lambda i:guard_op.fail_args[i].getref_base() + cpu.get_latest_value_count = lambda : len(guard_op.getfailargs()) + cpu.get_latest_value_int = lambda i:guard_op.getfailargs()[i].getint() + cpu.get_latest_value_ref = lambda i:guard_op.getfailargs()[i].getref_base() cpu.clear_latest_values = lambda count: None - resumereader = ResumeDataDirectReader(cpu, guard_op.descr) + resumereader = ResumeDataDirectReader(cpu, guard_op.getdescr()) vrefinfo = self.metainterp.staticdata.virtualref_info lst = [] vrefinfo.continue_tracing = lambda vref, virtual: \ @@ -100,7 +100,7 @@ lst[0][0]) # assert correct type # # try reloading from pyjitpl's point of view - self.metainterp.rebuild_state_after_failure(guard_op.descr) + self.metainterp.rebuild_state_after_failure(guard_op.getdescr()) assert len(self.metainterp.framestack) == 1 assert len(self.metainterp.virtualref_boxes) == 2 assert self.metainterp.virtualref_boxes[0].value == bxs1[0].value Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_ztranslation.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_ztranslation.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_ztranslation.py Thu Sep 30 00:16:20 2010 @@ -21,6 +21,7 @@ # - full optimizer # - jitdriver hooks # - two JITs + # - string concatenation, slicing and comparison class Frame(object): _virtualizable2_ = ['i'] @@ -60,11 +61,15 @@ frame.i -= 1 return total * 10 # - myjitdriver2 = JitDriver(greens = ['g'], reds = ['m', 'x']) + myjitdriver2 = JitDriver(greens = ['g'], reds = ['m', 'x', 's']) def f2(g, m, x): + s = "" while m > 0: - myjitdriver2.can_enter_jit(g=g, m=m, x=x) - myjitdriver2.jit_merge_point(g=g, m=m, x=x) + myjitdriver2.can_enter_jit(g=g, m=m, x=x, s=s) + myjitdriver2.jit_merge_point(g=g, m=m, x=x, s=s) + s += 'xy' + if s[:2] == 'yz': + return -666 m -= 1 x += 3 return x Modified: pypy/branch/fast-forward/pypy/jit/metainterp/warmstate.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/metainterp/warmstate.py (original) +++ pypy/branch/fast-forward/pypy/jit/metainterp/warmstate.py Thu Sep 30 00:16:20 2010 @@ -7,7 +7,8 @@ from pypy.rlib.rarithmetic import intmask from pypy.rlib.nonconst import NonConstant from pypy.rlib.unroll import unrolling_iterable -from pypy.rlib.jit import PARAMETERS, OPTIMIZER_SIMPLE, OPTIMIZER_FULL +from pypy.rlib.jit import (PARAMETERS, OPTIMIZER_SIMPLE, OPTIMIZER_FULL, + OPTIMIZER_NO_PERFECTSPEC) from pypy.rlib.jit import DEBUG_PROFILE from pypy.rlib.jit import BaseJitCell from pypy.rlib.debug import debug_start, debug_stop, debug_print @@ -83,6 +84,9 @@ return history.ConstFloat(value) else: return history.BoxFloat(value) + elif isinstance(value, str) or isinstance(value, unicode): + assert len(value) == 1 # must be a character + value = ord(value) else: value = intmask(value) if in_const_box: @@ -187,6 +191,10 @@ from pypy.jit.metainterp import simple_optimize self.optimize_loop = simple_optimize.optimize_loop self.optimize_bridge = simple_optimize.optimize_bridge + elif optimizer == OPTIMIZER_NO_PERFECTSPEC: + from pypy.jit.metainterp import optimize_nopspec + self.optimize_loop = optimize_nopspec.optimize_loop + self.optimize_bridge = optimize_nopspec.optimize_bridge elif optimizer == OPTIMIZER_FULL: from pypy.jit.metainterp import optimize self.optimize_loop = optimize.optimize_loop Modified: pypy/branch/fast-forward/pypy/jit/tool/loopviewer.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/tool/loopviewer.py (original) +++ pypy/branch/fast-forward/pypy/jit/tool/loopviewer.py Thu Sep 30 00:16:20 2010 @@ -1,30 +1,93 @@ #!/usr/bin/env python -""" Usage: loopviewer.py [loopnum] loopfile +""" +Parse and display the traces produced by pypy-c-jit when PYPYLOG is set. """ import autopath import py import sys +import optparse +from pprint import pprint from pypy.tool import logparser from pypy.jit.metainterp.test.oparser import parse from pypy.jit.metainterp.history import ConstInt from pypy.rpython.lltypesystem import llmemory, lltype -def main(loopnum, loopfile): +def main(loopfile, options): + print 'Loading file:' log = logparser.parse_log_file(loopfile) - loops = logparser.extract_category(log, "jit-log-opt-") - inp = loops[loopnum] - loop = parse(inp, no_namespace=True) - loop.show() + loops, summary = consider_category(log, options, "jit-log-opt-") + if not options.quiet: + for loop in loops: + loop.show() + + if options.summary: + print + print 'Summary:' + print_summary(summary) -if __name__ == '__main__': - if len(sys.argv) == 2: - loopnum = -1 - loopfile = sys.argv[1] - elif len(sys.argv) == 3: - loopnum = int(sys.argv[1]) - loopfile = sys.argv[2] + if options.diff: + # non-optimized loops and summary + nloops, nsummary = consider_category(log, options, "jit-log-noopt-") + print + print 'Summary of optimized-away operations' + print + diff = {} + keys = set(summary.keys()).union(set(nsummary)) + for key in keys: + before = nsummary.get(key, 0) + after = summary.get(key, 0) + diff[key] = (before-after, before, after) + print_diff(diff) + +def consider_category(log, options, category): + loops = logparser.extract_category(log, category) + if options.loopnum is None: + input_loops = loops else: - print __doc__ - sys.exit(1) - main(loopnum, loopfile) + input_loops = [loops[options.loopnum]] + loops = [parse(inp, no_namespace=True, nonstrict=True) + for inp in input_loops] + summary = {} + for loop in loops: + summary = loop.summary(summary) + return loops, summary + + +def print_summary(summary): + ops = [(summary[key], key) for key in summary] + ops.sort(reverse=True) + for n, key in ops: + print '%5d' % n, key + +def print_diff(diff): + ops = [(d, before, after, key) for key, (d, before, after) in diff.iteritems()] + ops.sort(reverse=True) + tot_before = 0 + tot_after = 0 + for d, before, after, key in ops: + tot_before += before + tot_after += after + print '%5d - %5d = %5d ' % (before, after, d), key + print '-' * 50 + print '%5d - %5d = %5d ' % (tot_before, tot_after, tot_before-tot_after), 'TOTAL' + +if __name__ == '__main__': + parser = optparse.OptionParser(usage="%prog loopfile [options]") + parser.add_option('-n', '--loopnum', dest='loopnum', default=-1, metavar='N', type=int, + help='show the loop number N [default: last]') + parser.add_option('-a', '--all', dest='loopnum', action='store_const', const=None, + help='show all loops in the file') + parser.add_option('-s', '--summary', dest='summary', action='store_true', default=False, + help='print a summary of the operations in the loop(s)') + parser.add_option('-d', '--diff', dest='diff', action='store_true', default=False, + help='print the difference between non-optimized and optimized operations in the loop(s)') + parser.add_option('-q', '--quiet', dest='quiet', action='store_true', default=False, + help='do not show the graphical representation of the loop') + + options, args = parser.parse_args() + if len(args) != 1: + parser.print_help() + sys.exit(2) + + main(args[0], options) Modified: pypy/branch/fast-forward/pypy/jit/tool/showstats.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/tool/showstats.py (original) +++ pypy/branch/fast-forward/pypy/jit/tool/showstats.py Thu Sep 30 00:16:20 2010 @@ -17,7 +17,7 @@ num_dmp = 0 num_guards = 0 for op in loop.operations: - if op.opnum == rop.DEBUG_MERGE_POINT: + if op.getopnum() == rop.DEBUG_MERGE_POINT: num_dmp += 1 else: num_ops += 1 Modified: pypy/branch/fast-forward/pypy/jit/tool/traceviewer.py ============================================================================== --- pypy/branch/fast-forward/pypy/jit/tool/traceviewer.py (original) +++ pypy/branch/fast-forward/pypy/jit/tool/traceviewer.py Thu Sep 30 00:16:20 2010 @@ -253,9 +253,10 @@ def main(loopfile, use_threshold, view=True): countname = py.path.local(loopfile + '.count') if countname.check(): - counts = [re.split(r' +', line, 1) for line in countname.readlines()] - counts = Counts([(k.strip("\n"), int(v.strip('\n'))) - for v, k in counts]) + counts = [re.split('( 20 and use_threshold: counts.threshold = l[-20] Modified: pypy/branch/fast-forward/pypy/module/array/benchmark/sumtst.c ============================================================================== --- pypy/branch/fast-forward/pypy/module/array/benchmark/sumtst.c (original) +++ pypy/branch/fast-forward/pypy/module/array/benchmark/sumtst.c Thu Sep 30 00:16:20 2010 @@ -1,3 +1,4 @@ +#include double sum(double *img); @@ -5,4 +6,4 @@ double *img=malloc(640*480*4*sizeof(double)); int sa=0; for (int l=0; l<500; l++) sum(img); -} +} Modified: pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py (original) +++ pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py Thu Sep 30 00:16:20 2010 @@ -140,7 +140,7 @@ for op in loop.operations: if op.getopname() == "debug_merge_point": sliced_loop = BytecodeTrace() - sliced_loop.bytecode = op.args[0]._get_str().rsplit(" ", 1)[1] + sliced_loop.bytecode = op.getarg(0)._get_str().rsplit(" ", 1)[1] self.sliced_loops.append(sliced_loop) else: sliced_loop.append(op) @@ -798,7 +798,6 @@ if i > 750: a = b return sa ''', 215, ([], 12481752)) - assert False def test_array_sum(self): for tc, maxops in zip('bhilBHILfd', (38,) * 6 + (40, 40, 41, 38)): @@ -869,6 +868,24 @@ return intimg[i - 1] ''', maxops, ([tc], res)) + def test_unpackiterable(self): + self.run_source(''' + from array import array + + def main(): + i = 0 + t = array('l', (1, 2)) + while i < 2000: + a, b = t + i += 1 + return 3 + + ''', 100, ([], 3)) + bytecode, = self.get_by_bytecode("UNPACK_SEQUENCE") + # we allocate virtual ref and frame, we don't want block + assert len(bytecode.get_opnames('call_may_force')) == 0 + + def test_intbound_simple(self): ops = ('<', '>', '<=', '>=', '==', '!=') nbr = (3, 7) Modified: pypy/branch/fast-forward/pypy/objspace/std/objspace.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/objspace.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/objspace.py Thu Sep 30 00:16:20 2010 @@ -7,7 +7,7 @@ from pypy.objspace.std import (builtinshortcut, stdtypedef, frame, model, transparent, callmethod, proxyobject) from pypy.objspace.descroperation import DescrOperation, raiseattrerror -from pypy.rlib.objectmodel import instantiate, r_dict +from pypy.rlib.objectmodel import instantiate, r_dict, specialize from pypy.rlib.debug import make_sure_not_resized from pypy.rlib.rarithmetic import base_int from pypy.rlib.objectmodel import we_are_translated @@ -350,7 +350,8 @@ raise self._wrap_expected_length(expected_length, len(t)) return t - def fixedview(self, w_obj, expected_length=-1): + @specialize.arg(3) + def fixedview(self, w_obj, expected_length=-1, unroll=False): """ Fast paths """ if isinstance(w_obj, W_TupleObject): @@ -358,18 +359,26 @@ elif isinstance(w_obj, W_ListObject): t = w_obj.wrappeditems[:] else: - return ObjSpace.fixedview(self, w_obj, expected_length) + if unroll: + return make_sure_not_resized(ObjSpace.unpackiterable_unroll( + self, w_obj, expected_length)[:]) + else: + return make_sure_not_resized(ObjSpace.unpackiterable( + self, w_obj, expected_length)[:]) if expected_length != -1 and len(t) != expected_length: raise self._wrap_expected_length(expected_length, len(t)) return t + def fixedview_unroll(self, w_obj, expected_length=-1): + return self.fixedview(w_obj, expected_length, unroll=True) + def listview(self, w_obj, expected_length=-1): if isinstance(w_obj, W_ListObject): t = w_obj.wrappeditems elif isinstance(w_obj, W_TupleObject): t = w_obj.wrappeditems[:] else: - return ObjSpace.listview(self, w_obj, expected_length) + return ObjSpace.unpackiterable(self, w_obj, expected_length) if expected_length != -1 and len(t) != expected_length: raise self._wrap_expected_length(expected_length, len(t)) return t Modified: pypy/branch/fast-forward/pypy/objspace/std/stringtype.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/stringtype.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/stringtype.py Thu Sep 30 00:16:20 2010 @@ -4,6 +4,7 @@ from sys import maxint from pypy.rlib.objectmodel import specialize +from pypy.rlib.jit import we_are_jitted def wrapstr(space, s): from pypy.objspace.std.stringobject import W_StringObject @@ -32,7 +33,7 @@ def wrapchar(space, c): from pypy.objspace.std.stringobject import W_StringObject from pypy.objspace.std.ropeobject import rope, W_RopeObject - if space.config.objspace.std.withprebuiltchar: + if space.config.objspace.std.withprebuiltchar and not we_are_jitted(): if space.config.objspace.std.withrope: return W_RopeObject.PREBUILT[ord(c)] return W_StringObject.PREBUILT[ord(c)] Modified: pypy/branch/fast-forward/pypy/rlib/jit.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/jit.py (original) +++ pypy/branch/fast-forward/pypy/rlib/jit.py Thu Sep 30 00:16:20 2010 @@ -224,7 +224,8 @@ """Inconsistency in the JIT hints.""" OPTIMIZER_SIMPLE = 0 -OPTIMIZER_FULL = 1 +OPTIMIZER_NO_PERFECTSPEC = 1 +OPTIMIZER_FULL = 2 DEBUG_OFF = 0 DEBUG_PROFILE = 1 Modified: pypy/branch/fast-forward/pypy/rlib/rmmap.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rmmap.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rmmap.py Thu Sep 30 00:16:20 2010 @@ -292,7 +292,8 @@ c_munmap(self.getptr(0), self.size) self.setdata(NODATA, 0) - __del__ = close + def __del__(self): + self.close() def unmapview(self): UnmapViewOfFile(self.getptr(0)) Modified: pypy/branch/fast-forward/pypy/rlib/rstring.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/rstring.py (original) +++ pypy/branch/fast-forward/pypy/rlib/rstring.py Thu Sep 30 00:16:20 2010 @@ -1,9 +1,10 @@ """ String builder interface and string functions """ -from pypy.rpython.extregistry import ExtRegistryEntry from pypy.annotation.model import SomeObject, SomeString, s_None,\ SomeChar, SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString +from pypy.rlib.rarithmetic import ovfcheck +from pypy.rpython.extregistry import ExtRegistryEntry # -------------- public API for string functions ----------------------- @@ -46,9 +47,7 @@ # -------------- public API --------------------------------- -# the following number is the maximum size of an RPython unicode -# string that goes into the nursery of the minimark GC. -INIT_SIZE = 56 +INIT_SIZE = 100 # XXX tweak class AbstractStringBuilder(object): def __init__(self, init_size=INIT_SIZE): @@ -84,6 +83,11 @@ result = None factor = 1 assert mul > 0 + try: + ovfcheck(len(s) * mul) + except OverflowError: + raise MemoryError + limit = mul >> 1 while True: if mul & factor: Modified: pypy/branch/fast-forward/pypy/rlib/test/test_rstring.py ============================================================================== --- pypy/branch/fast-forward/pypy/rlib/test/test_rstring.py (original) +++ pypy/branch/fast-forward/pypy/rlib/test/test_rstring.py Thu Sep 30 00:16:20 2010 @@ -1,5 +1,8 @@ +import sys + +from pypy.rlib.rstring import StringBuilder, UnicodeBuilder, split, rsplit, \ + string_repeat -from pypy.rlib.rstring import StringBuilder, UnicodeBuilder, split, rsplit def test_split(): assert split("", 'x') == [''] @@ -39,3 +42,6 @@ s.append_multiple_char('d', 4) assert s.build() == 'aabcbdddd' assert isinstance(s.build(), unicode) + +def test_string_repeat(): + raises(MemoryError, string_repeat, "abc", sys.maxint) Modified: pypy/branch/fast-forward/pypy/rpython/annlowlevel.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/annlowlevel.py (original) +++ pypy/branch/fast-forward/pypy/rpython/annlowlevel.py Thu Sep 30 00:16:20 2010 @@ -397,6 +397,8 @@ assert strtype in (str, unicode) def hlstr(ll_s): + if not ll_s: + return None if hasattr(ll_s, 'chars'): if strtype is str: return ''.join(ll_s.chars) @@ -423,9 +425,14 @@ def llstr(s): from pypy.rpython.lltypesystem.rstr import mallocstr, mallocunicode + from pypy.rpython.lltypesystem.rstr import STR, UNICODE if strtype is str: + if s is None: + return lltype.nullptr(STR) ll_s = mallocstr(len(s)) else: + if s is None: + return lltype.nullptr(UNICODE) ll_s = mallocunicode(len(s)) for i, c in enumerate(s): ll_s.chars[i] = c Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py Thu Sep 30 00:16:20 2010 @@ -29,6 +29,60 @@ from pypy.translator.platform import platform from array import array +# ____________________________________________________________ + +far_regions = None + +def allocate_ctypes(ctype): + if far_regions: + import random + pieces = far_regions._ll2ctypes_pieces + num = random.randrange(len(pieces)) + i1, stop = pieces[num] + i2 = i1 + ((ctypes.sizeof(ctype) or 1) + 7) & ~7 + if i2 > stop: + raise MemoryError("out of memory in far_regions") + pieces[num] = i2, stop + p = lltype2ctypes(far_regions.getptr(i1)) + return ctypes.cast(p, ctypes.POINTER(ctype)).contents + else: + return ctype() + +def do_allocation_in_far_regions(): + """On 32 bits: this reserves 1.25GB of address space, or 2.5GB on Linux, + which helps test this module for address values that are signed or + unsigned. + + On 64-bits: reserves 10 times 2GB of address space. This should help + to find 32-vs-64-bit issues in the JIT. It is likely that objects + are further apart than 32 bits can represent; it is also possible + to hit the corner case of being precisely e.g. 2GB - 8 bytes apart. + + Avoid this function if your OS reserves actual RAM from mmap() eagerly. + """ + global far_regions + if not far_regions: + from pypy.rlib import rmmap + if sys.maxint > 0x7FFFFFFF: + PIECESIZE = 0x80000000 + else: + if sys.platform == 'linux': + PIECESIZE = 0x10000000 + else: + PIECESIZE = 0x08000000 + PIECES = 10 + m = rmmap.mmap(-1, PIECES * PIECESIZE, + rmmap.MAP_PRIVATE|rmmap.MAP_ANONYMOUS, + rmmap.PROT_READ|rmmap.PROT_WRITE) + m.close = lambda : None # leak instead of giving a spurious + # error at CPython's shutdown + m._ll2ctypes_pieces = [] + for i in range(PIECES): + m._ll2ctypes_pieces.append((i * PIECESIZE, (i+1) * PIECESIZE)) + far_regions = m + +# ____________________________________________________________ + _ctypes_cache = {} _eci_cache = {} @@ -91,13 +145,13 @@ if S._arrayfld is None: if n is not None: raise TypeError("%r is not variable-sized" % (S,)) - storage = cls() + storage = allocate_ctypes(cls) return storage else: if n is None: raise TypeError("%r is variable-sized" % (S,)) biggercls = build_ctypes_struct(S, None, n) - bigstruct = biggercls() + bigstruct = allocate_ctypes(biggercls) array = getattr(bigstruct, S._arrayfld) if hasattr(array, 'length'): array.length = n @@ -139,7 +193,7 @@ if not isinstance(n, int): raise TypeError, "array length must be an int" biggercls = get_ctypes_array_of_size(A, n) - bigarray = biggercls() + bigarray = allocate_ctypes(biggercls) if hasattr(bigarray, 'length'): bigarray.length = n return bigarray @@ -379,7 +433,7 @@ "Returns the storage address as an int" if self._storage is None or self._storage is True: raise ValueError("Not a ctypes allocated structure") - return ctypes.cast(self._storage, ctypes.c_void_p).value + return intmask(ctypes.cast(self._storage, ctypes.c_void_p).value) def _free(self): self._check() # no double-frees Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/llarena.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/llarena.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/llarena.py Thu Sep 30 00:16:20 2010 @@ -472,22 +472,25 @@ clear_large_memory_chunk = llmemory.raw_memclear +llimpl_malloc = rffi.llexternal('malloc', [lltype.Signed], llmemory.Address, + sandboxsafe=True, _nowrapper=True) +llimpl_free = rffi.llexternal('free', [llmemory.Address], lltype.Void, + sandboxsafe=True, _nowrapper=True) + def llimpl_arena_malloc(nbytes, zero): - addr = llmemory.raw_malloc(nbytes) - if zero and bool(addr): - clear_large_memory_chunk(addr, nbytes) + addr = llimpl_malloc(nbytes) + if bool(addr): + llimpl_arena_reset(addr, nbytes, zero) return addr -register_external(arena_malloc, [int, bool], llmemory.Address, +llimpl_arena_malloc._always_inline_ = True +register_external(arena_malloc, [int, int], llmemory.Address, 'll_arena.arena_malloc', llimpl=llimpl_arena_malloc, llfakeimpl=arena_malloc, sandboxsafe=True) -def llimpl_arena_free(arena_addr): - # NB. minimark.py assumes that arena_free() is actually just a raw_free(). - llmemory.raw_free(arena_addr) register_external(arena_free, [llmemory.Address], None, 'll_arena.arena_free', - llimpl=llimpl_arena_free, + llimpl=llimpl_free, llfakeimpl=arena_free, sandboxsafe=True) @@ -497,6 +500,7 @@ clear_large_memory_chunk(arena_addr, size) else: llmemory.raw_memclear(arena_addr, size) +llimpl_arena_reset._always_inline_ = True register_external(arena_reset, [llmemory.Address, int, int], None, 'll_arena.arena_reset', llimpl=llimpl_arena_reset, Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/rlist.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/rlist.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/rlist.py Thu Sep 30 00:16:20 2010 @@ -159,7 +159,6 @@ if 'item_repr' not in self.__dict__: self.external_item_repr, self.item_repr = externalvsinternal(self.rtyper, self._item_repr_computer()) if isinstance(self.LIST, GcForwardReference): - ITEM = self.item_repr.lowleveltype ITEMARRAY = self.get_itemarray_lowleveltype() self.LIST.become(ITEMARRAY) Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/rstr.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/rstr.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/rstr.py Thu Sep 30 00:16:20 2010 @@ -4,7 +4,7 @@ from pypy.rlib.objectmodel import malloc_zero_filled, we_are_translated from pypy.rlib.objectmodel import _hash_string, enforceargs from pypy.rlib.debug import ll_assert -from pypy.rlib.jit import purefunction +from pypy.rlib.jit import purefunction, we_are_jitted from pypy.rpython.robject import PyObjRepr, pyobj_repr from pypy.rpython.rmodel import inputconst, IntegerRepr from pypy.rpython.rstr import AbstractStringRepr,AbstractCharRepr,\ @@ -65,8 +65,8 @@ dst = llmemory.cast_ptr_to_adr(dst) + _str_ofs(dststart) llmemory.raw_memcopy(src, dst, llmemory.sizeof(CHAR_TP) * length) copy_string_contents._always_inline_ = True - copy_string_contents.oopspec = ( - '%s.copy_contents(src, dst, srcstart, dststart, length)' % name) + #copy_string_contents.oopspec = ( + # '%s.copy_contents(src, dst, srcstart, dststart, length)' % name) return func_with_new_name(copy_string_contents, 'copy_%s_contents' % name) copy_string_contents = _new_copy_contents_fun(STR, Char, 'string') @@ -326,6 +326,7 @@ s1.copy_contents(s1, newstr, 0, 0, len1) s1.copy_contents(s2, newstr, 0, len1, len2) return newstr + ll_strconcat.oopspec = 'stroruni.concat(s1, s2)' @purefunction def ll_strip(s, ch, left, right): @@ -443,8 +444,8 @@ if chars1[j] != chars2[j]: return False j += 1 - return True + ll_streq.oopspec = 'stroruni.equal(s1, s2)' @purefunction def ll_startswith(s1, s2): @@ -696,35 +697,33 @@ return result @purefunction - def ll_stringslice_startonly(s1, start): - len1 = len(s1.chars) - newstr = s1.malloc(len1 - start) - lgt = len1 - start - assert lgt >= 0 + def _ll_stringslice(s1, start, stop): + lgt = stop - start assert start >= 0 + assert lgt >= 0 + newstr = s1.malloc(lgt) s1.copy_contents(s1, newstr, start, 0, lgt) return newstr + _ll_stringslice.oopspec = 'stroruni.slice(s1, start, stop)' + _ll_stringslice._annenforceargs_ = [None, int, int] + + def ll_stringslice_startonly(s1, start): + return LLHelpers._ll_stringslice(s1, start, len(s1.chars)) - @purefunction def ll_stringslice_startstop(s1, start, stop): - if stop >= len(s1.chars): - if start == 0: - return s1 - stop = len(s1.chars) - newstr = s1.malloc(stop - start) - assert start >= 0 - lgt = stop - start - assert lgt >= 0 - s1.copy_contents(s1, newstr, start, 0, lgt) - return newstr + if we_are_jitted(): + if stop > len(s1.chars): + stop = len(s1.chars) + else: + if stop >= len(s1.chars): + if start == 0: + return s1 + stop = len(s1.chars) + return LLHelpers._ll_stringslice(s1, start, stop) - @purefunction def ll_stringslice_minusone(s1): newlen = len(s1.chars) - 1 - newstr = s1.malloc(newlen) - assert newlen >= 0 - s1.copy_contents(s1, newstr, 0, 0, newlen) - return newstr + return LLHelpers._ll_stringslice(s1, 0, newlen) def ll_split_chr(LIST, s, c): chars = s.chars Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_ll2ctypes.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_ll2ctypes.py (original) +++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_ll2ctypes.py Thu Sep 30 00:16:20 2010 @@ -16,6 +16,12 @@ from pypy.annotation.annrpython import RPythonAnnotator from pypy.rpython.rtyper import RPythonTyper + +if False: # for now, please keep it False by default + from pypy.rpython.lltypesystem import ll2ctypes + ll2ctypes.do_allocation_in_far_regions() + + class TestLL2Ctypes(object): def setup_method(self, meth): Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py Thu Sep 30 00:16:20 2010 @@ -39,6 +39,9 @@ def can_malloc_nonmovable(self): return not self.moving_gc + def can_optimize_clean_setarrayitems(self): + return True # False in case of card marking + # The following flag enables costly consistency checks after each # collection. It is automatically set to True by test_gc.py. The # checking logic is translatable, so the flag can be set to True @@ -76,7 +79,7 @@ def set_root_walker(self, root_walker): self.root_walker = root_walker - def write_barrier(self, newvalue, addr_struct): + def write_barrier(self, addr_struct): pass def statistics(self, index): Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py Thu Sep 30 00:16:20 2010 @@ -147,6 +147,11 @@ def get_young_var_basesize(nursery_size): return nursery_size // 4 - 1 + @classmethod + def JIT_max_size_of_young_obj(cls): + min_nurs_size = cls.TRANSLATION_PARAMS['min_nursery_size'] + return cls.get_young_fixedsize(min_nurs_size) + def is_in_nursery(self, addr): ll_assert(llmemory.cast_adr_to_int(addr) & 1 == 0, "odd-valued (i.e. tagged) pointer unexpected here") @@ -321,7 +326,7 @@ addr = pointer.address[0] newaddr = self.copy(addr) pointer.address[0] = newaddr - self.write_into_last_generation_obj(obj, newaddr) + self.write_into_last_generation_obj(obj) # ____________________________________________________________ # Implementation of nursery-only collections @@ -452,11 +457,12 @@ # "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS - def write_barrier(self, newvalue, addr_struct): + def write_barrier(self, addr_struct): if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS: - self.remember_young_pointer(addr_struct, newvalue) + self.remember_young_pointer(addr_struct) def _setup_wb(self): + DEBUG = self.DEBUG # The purpose of attaching remember_young_pointer to the instance # instead of keeping it as a regular method is to help the JIT call it. # Additionally, it makes the code in write_barrier() marginally smaller @@ -464,33 +470,24 @@ # For x86, there is also an extra requirement: when the JIT calls # remember_young_pointer(), it assumes that it will not touch the SSE # registers, so it does not save and restore them (that's a *hack*!). - def remember_young_pointer(addr_struct, addr): + def remember_young_pointer(addr_struct): #llop.debug_print(lltype.Void, "\tremember_young_pointer", # addr_struct, "<-", addr) - ll_assert(not self.is_in_nursery(addr_struct), - "nursery object with GCFLAG_NO_YOUNG_PTRS") - # if we have tagged pointers around, we first need to check whether - # we have valid pointer here, otherwise we can do it after the - # is_in_nursery check - if (self.config.taggedpointers and - not self.is_valid_gc_object(addr)): - return - if self.is_in_nursery(addr): - self.old_objects_pointing_to_young.append(addr_struct) - self.header(addr_struct).tid &= ~GCFLAG_NO_YOUNG_PTRS - elif (not self.config.taggedpointers and - not self.is_valid_gc_object(addr)): - return - self.write_into_last_generation_obj(addr_struct, addr) + if DEBUG: + ll_assert(not self.is_in_nursery(addr_struct), + "nursery object with GCFLAG_NO_YOUNG_PTRS") + self.old_objects_pointing_to_young.append(addr_struct) + self.header(addr_struct).tid &= ~GCFLAG_NO_YOUNG_PTRS + self.write_into_last_generation_obj(addr_struct) remember_young_pointer._dont_inline_ = True self.remember_young_pointer = remember_young_pointer - def write_into_last_generation_obj(self, addr_struct, addr): + def write_into_last_generation_obj(self, addr_struct): objhdr = self.header(addr_struct) if objhdr.tid & GCFLAG_NO_HEAP_PTRS: - if not self.is_last_generation(addr): - objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS - self.last_generation_root_objects.append(addr_struct) + objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS + self.last_generation_root_objects.append(addr_struct) + write_into_last_generation_obj._always_inline_ = True def assume_young_pointers(self, addr_struct): objhdr = self.header(addr_struct) Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/minimark.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gc/minimark.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gc/minimark.py Thu Sep 30 00:16:20 2010 @@ -1,6 +1,7 @@ import sys from pypy.rpython.lltypesystem import lltype, llmemory, llarena, llgroup from pypy.rpython.lltypesystem.lloperation import llop +from pypy.rpython.lltypesystem.llmemory import raw_malloc_usage from pypy.rpython.memory.gc.base import GCBase, MovingGCBase from pypy.rpython.memory.gc import minimarkpage, base, generation from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE @@ -92,26 +93,28 @@ # PYPY_GC_NURSERY and fall back to half the size of # the L2 cache. For 'major_collection_threshold' it will look # it up in the env var PYPY_GC_MAJOR_COLLECT. It also sets - # 'max_heap_size' to PYPY_GC_MAX. + # 'max_heap_size' to PYPY_GC_MAX. Finally, PYPY_GC_MIN sets + # the minimal value of 'next_major_collection_threshold'. "read_from_env": True, # The size of the nursery. Note that this is only used as a # fall-back number. "nursery_size": 896*1024, - # The system page size. Like obmalloc.c, we assume that it is 4K, - # which is OK for most systems. - "page_size": 4096, + # The system page size. Like obmalloc.c, we assume that it is 4K + # for 32-bit systems; unlike obmalloc.c, we assume that it is 8K + # for 64-bit systems, for consistent results. + "page_size": 1024*WORD, # The size of an arena. Arenas are groups of pages allocated # together. "arena_size": 65536*WORD, # The maximum size of an object allocated compactly. All objects - # that are larger are just allocated with raw_malloc(). The value - # chosen here is enough for a unicode string of length 56 (on 64-bits) - # or 60 (on 32-bits). See rlib.rstring.INIT_SIZE. - "small_request_threshold": 256-WORD, + # that are larger are just allocated with raw_malloc(). Note that + # the size limit for being first allocated in the nursery is much + # larger; see below. + "small_request_threshold": 35*WORD, # Full collection threshold: after a major collection, we record # the total size consumed; and after every minor collection, if the @@ -125,7 +128,16 @@ # in regular arrays of pointers; more in arrays whose items are # larger. A value of 0 disables card marking. "card_page_indices": 128, - "card_page_indices_min": 800, # minimum number of indices for cards + + # Objects whose total size is at least 'large_object' bytes are + # allocated out of the nursery immediately. If the object + # has GC pointers in its varsized part, we use instead the + # higher limit 'large_object_gcptrs'. The idea is that + # separately allocated objects are allocated immediately "old" + # and it's not good to have too many pointers from old to young + # objects. + "large_object": 1600*WORD, + "large_object_gcptrs": 8250*WORD, } def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE, @@ -136,7 +148,8 @@ small_request_threshold=5*WORD, major_collection_threshold=2.5, card_page_indices=0, - card_page_indices_min=None, + large_object=8*WORD, + large_object_gcptrs=10*WORD, ArenaCollectionClass=None): MovingGCBase.__init__(self, config, chunk_size) assert small_request_threshold % WORD == 0 @@ -145,16 +158,23 @@ self.small_request_threshold = small_request_threshold self.major_collection_threshold = major_collection_threshold self.num_major_collects = 0 + self.min_heap_size = 0.0 self.max_heap_size = 0.0 self.max_heap_size_already_raised = False # self.card_page_indices = card_page_indices if self.card_page_indices > 0: - self.card_page_indices_min = card_page_indices_min self.card_page_shift = 0 while (1 << self.card_page_shift) < self.card_page_indices: self.card_page_shift += 1 # + # 'large_object' and 'large_object_gcptrs' limit how big objects + # can be in the nursery, so they give a lower bound on the allowed + # size of the nursery. + self.nonlarge_max = large_object - 1 + self.nonlarge_gcptrs_max = large_object_gcptrs - 1 + assert self.nonlarge_max <= self.nonlarge_gcptrs_max + # self.nursery = NULL self.nursery_free = NULL self.nursery_top = NULL @@ -218,7 +238,7 @@ else: # defaultsize = self.nursery_size - minsize = 18 * self.small_request_threshold + minsize = 2 * (self.nonlarge_gcptrs_max + 1) self.nursery_size = minsize self.allocate_nursery() # @@ -229,66 +249,92 @@ newsize = generation.estimate_best_nursery_size() if newsize <= 0: newsize = defaultsize + newsize = max(newsize, minsize) # major_coll = base.read_float_from_env('PYPY_GC_MAJOR_COLLECT') if major_coll >= 1.0: self.major_collection_threshold = major_coll # + min_heap_size = base.read_uint_from_env('PYPY_GC_MIN') + if min_heap_size > 0: + self.min_heap_size = float(min_heap_size) + else: + # defaults to 8 times the nursery + self.min_heap_size = newsize * 8 + # max_heap_size = base.read_uint_from_env('PYPY_GC_MAX') if max_heap_size > 0: self.max_heap_size = float(max_heap_size) # self.minor_collection() # to empty the nursery llarena.arena_free(self.nursery) - self.nursery_size = max(newsize, minsize) + self.nursery_size = newsize self.allocate_nursery() def allocate_nursery(self): debug_start("gc-set-nursery-size") debug_print("nursery size:", self.nursery_size) - # the start of the nursery: we actually allocate a tiny bit more for + # the start of the nursery: we actually allocate a bit more for # the nursery than really needed, to simplify pointer arithmetic - # in malloc_fixedsize_clear(). - extra = self.small_request_threshold - self.nursery = llarena.arena_malloc(self.nursery_size + extra, True) + # in malloc_fixedsize_clear(). The few extra pages are never used + # anyway so it doesn't even count. + extra = self.nonlarge_gcptrs_max + 1 + self.nursery = llarena.arena_malloc(self.nursery_size + extra, 2) if not self.nursery: raise MemoryError("cannot allocate nursery") # the current position in the nursery: self.nursery_free = self.nursery # the end of the nursery: self.nursery_top = self.nursery + self.nursery_size - # initialize the threshold, a bit arbitrarily - self.next_major_collection_threshold = ( - self.nursery_size * self.major_collection_threshold) + # initialize the threshold + self.min_heap_size = max(self.min_heap_size, self.nursery_size * + self.major_collection_threshold) + self.set_major_threshold_from(0.0) debug_stop("gc-set-nursery-size") + def set_major_threshold_from(self, threshold): + # Set the next_major_collection_threshold. + if threshold < self.min_heap_size: + threshold = self.min_heap_size + # + if self.max_heap_size > 0.0 and threshold > self.max_heap_size: + threshold = self.max_heap_size + bounded = True + else: + bounded = False + # + self.next_major_collection_threshold = threshold + return bounded + def malloc_fixedsize_clear(self, typeid, size, can_collect=True, needs_finalizer=False, contains_weakptr=False): ll_assert(can_collect, "!can_collect") size_gc_header = self.gcheaderbuilder.size_gc_header totalsize = size_gc_header + size - rawtotalsize = llmemory.raw_malloc_usage(totalsize) + rawtotalsize = raw_malloc_usage(totalsize) # # If the object needs a finalizer, ask for a rawmalloc. # The following check should be constant-folded. if needs_finalizer: ll_assert(not contains_weakptr, "'needs_finalizer' and 'contains_weakptr' both specified") - result = self.malloc_with_finalizer(typeid, totalsize) + obj = self.external_malloc(typeid, 0) + self.objects_with_finalizers.append(obj) # - # If totalsize is greater than small_request_threshold, ask for - # a rawmalloc. The following check should be constant-folded. - elif rawtotalsize > self.small_request_threshold: + # If totalsize is greater than nonlarge_max (which should never be + # the case in practice), ask for a rawmalloc. The following check + # should be constant-folded. + elif rawtotalsize > self.nonlarge_max: ll_assert(not contains_weakptr, "'contains_weakptr' specified for a large object") - result = self._external_malloc(typeid, totalsize) + obj = self.external_malloc(typeid, 0) # else: # If totalsize is smaller than minimal_size_in_nursery, round it # up. The following check should also be constant-folded. - min_size = llmemory.raw_malloc_usage(self.minimal_size_in_nursery) + min_size = raw_malloc_usage(self.minimal_size_in_nursery) if rawtotalsize < min_size: totalsize = rawtotalsize = min_size # @@ -306,8 +352,10 @@ # If it is a weakref, record it (check constant-folded). if contains_weakptr: self.young_objects_with_weakrefs.append(result+size_gc_header) + # + obj = result + size_gc_header # - return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF) + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) def malloc_varsize_clear(self, typeid, length, size, itemsize, @@ -315,32 +363,41 @@ ll_assert(can_collect, "!can_collect") size_gc_header = self.gcheaderbuilder.size_gc_header nonvarsize = size_gc_header + size - try: - varsize = ovfcheck(itemsize * length) - totalsize = ovfcheck(nonvarsize + varsize) - except OverflowError: - raise MemoryError # - # If totalsize is greater than small_request_threshold, ask for - # a rawmalloc. - if llmemory.raw_malloc_usage(totalsize) > self.small_request_threshold: - result = self._external_malloc_cardmark(typeid, totalsize, length) + # Compute the maximal length that makes the object still + # below 'nonlarge_max'. All the following logic is usually + # constant-folded because self.nonlarge_max, size and itemsize + # are all constants (the arguments are constant due to + # inlining) and self.has_gcptr_in_varsize() is constant-folded. + if self.has_gcptr_in_varsize(typeid): + nonlarge_max = self.nonlarge_gcptrs_max + else: + nonlarge_max = self.nonlarge_max + + if not raw_malloc_usage(itemsize): + too_many_items = raw_malloc_usage(nonvarsize) > nonlarge_max + else: + maxlength = nonlarge_max - raw_malloc_usage(nonvarsize) + maxlength = maxlength // raw_malloc_usage(itemsize) + too_many_items = length > maxlength + + if too_many_items: + # + # If the total size of the object would be larger than + # 'nonlarge_max', then allocate it externally. + obj = self.external_malloc(typeid, length) # else: - # Round the size up to the next multiple of WORD. Note that - # this is done only if totalsize <= self.small_request_threshold, - # i.e. it cannot overflow, and it keeps the property that - # totalsize <= self.small_request_threshold. + # With the above checks we know now that totalsize cannot be more + # than 'nonlarge_max'; in particular, the + and * cannot overflow. + totalsize = nonvarsize + itemsize * length totalsize = llarena.round_up_for_allocation(totalsize) - ll_assert(llmemory.raw_malloc_usage(totalsize) <= - self.small_request_threshold, - "round_up_for_allocation() rounded up too much?") # # 'totalsize' should contain at least the GC header and # the length word, so it should never be smaller than # 'minimal_size_in_nursery' - ll_assert(llmemory.raw_malloc_usage(totalsize) >= - llmemory.raw_malloc_usage(self.minimal_size_in_nursery), + ll_assert(raw_malloc_usage(totalsize) >= + raw_malloc_usage(self.minimal_size_in_nursery), "malloc_varsize_clear(): totalsize < minimalsize") # # Get the memory from the nursery. If there is not enough space @@ -353,10 +410,12 @@ # Build the object. llarena.arena_reserve(result, totalsize) self.init_gc_object(result, typeid, flags=0) + # + # Set the length and return the object. + obj = result + size_gc_header + (obj + offset_to_length).signed[0] = length # - # Set the length and return the object. - (result + size_gc_header + offset_to_length).signed[0] = length - return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF) + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) def collect(self, gen=1): @@ -389,105 +448,108 @@ collect_and_reserve._dont_inline_ = True - def _full_collect_if_needed(self, reserving_size): - reserving_size = llmemory.raw_malloc_usage(reserving_size) - if (float(self.get_total_memory_used()) + reserving_size > - self.next_major_collection_threshold): - self.minor_collection() - self.major_collection(reserving_size) - - def _external_malloc(self, typeid, totalsize): - """Allocate a large object using raw_malloc().""" - return self._external_malloc_cardmark(typeid, totalsize, 0) - - - def _external_malloc_cardmark(self, typeid, totalsize, length): - """Allocate a large object using raw_malloc(), possibly as an - object with card marking enabled, if its length is large enough. - 'length' can be specified as 0 if the object is not varsized.""" + def external_malloc(self, typeid, length): + """Allocate a large object using the ArenaCollection or + raw_malloc(), possibly as an object with card marking enabled, + if it has gc pointers in its var-sized part. 'length' should be + specified as 0 if the object is not varsized. The returned + object is fully initialized and zero-filled.""" + # + # Compute the total size, carefully checking for overflows. + size_gc_header = self.gcheaderbuilder.size_gc_header + nonvarsize = size_gc_header + self.fixed_size(typeid) + if length == 0: + # this includes the case of fixed-size objects, for which we + # should not even ask for the varsize_item_sizes(). + totalsize = nonvarsize + else: + itemsize = self.varsize_item_sizes(typeid) + try: + varsize = ovfcheck(itemsize * length) + totalsize = ovfcheck(nonvarsize + varsize) + except OverflowError: + raise MemoryError # # If somebody calls this function a lot, we must eventually # force a full collection. - self._full_collect_if_needed(totalsize) + if (float(self.get_total_memory_used()) + raw_malloc_usage(totalsize) > + self.next_major_collection_threshold): + self.minor_collection() + self.major_collection(raw_malloc_usage(totalsize)) # - # Check if we need to introduce the card marker bits area. - if (self.card_page_indices <= 0 # <- this check is constant-folded - or length < self.card_page_indices_min # <- must be large enough - or not self.has_gcptr_in_varsize(typeid)): # <- must contain ptrs + # Check if the object would fit in the ArenaCollection. + if raw_malloc_usage(totalsize) <= self.small_request_threshold: + # + # Yes. Round up 'totalsize' (it cannot overflow and it + # must remain <= self.small_request_threshold.) + totalsize = llarena.round_up_for_allocation(totalsize) + ll_assert(raw_malloc_usage(totalsize) <= + self.small_request_threshold, + "rounding up made totalsize > small_request_threshold") # - # In these cases, we don't want a card marker bits area. - cardheadersize = 0 + # Allocate from the ArenaCollection and clear the memory returned. + result = self.ac.malloc(totalsize) + llmemory.raw_memclear(result, totalsize) extra_flags = 0 # else: - # Reserve N extra words containing card bits before the object. - extra_words = self.card_marking_words_for_length(length) - cardheadersize = WORD * extra_words - extra_flags = GCFLAG_HAS_CARDS - # - allocsize = cardheadersize + llmemory.raw_malloc_usage(totalsize) - # - # Allocate the object using arena_malloc(), which we assume here - # is just the same as raw_malloc(), but allows the extra flexibility - # of saying that we have extra words in the header. - arena = llarena.arena_malloc(allocsize, False) - if not arena: - raise MemoryError("cannot allocate large object") - # - # Clear it using method 2 of llarena.arena_reset(), which is the - # same as just a raw_memclear(). - llarena.arena_reset(arena, allocsize, 2) - # - # Reserve the card mark as a list of single bytes - # (the loop is empty in C). - i = 0 - while i < cardheadersize: - llarena.arena_reserve(arena + i, llmemory.sizeof(lltype.Char)) - i += 1 - # - # Initialize the object. - result = arena + cardheadersize - llarena.arena_reserve(result, totalsize) - self.init_gc_object(result, typeid, GCFLAG_NO_YOUNG_PTRS | extra_flags) - # - # Record the newly allocated object and its size. - size_gc_header = self.gcheaderbuilder.size_gc_header - self.rawmalloced_total_size += llmemory.raw_malloc_usage(totalsize) - self.rawmalloced_objects.append(result + size_gc_header) - return result - _external_malloc_cardmark._dont_inline_ = True - - - def _malloc_nonmovable(self, typeid, totalsize): - """Allocate an object non-movable.""" - # - rawtotalsize = llmemory.raw_malloc_usage(totalsize) - if rawtotalsize > self.small_request_threshold: + # No, so proceed to allocate it externally with raw_malloc(). + # Check if we need to introduce the card marker bits area. + if (self.card_page_indices <= 0 # <- this check is constant-folded + or not self.has_gcptr_in_varsize(typeid) or + raw_malloc_usage(totalsize) <= self.nonlarge_gcptrs_max): + # + # In these cases, we don't want a card marker bits area. + # This case also includes all fixed-size objects. + cardheadersize = 0 + extra_flags = 0 + # + else: + # Reserve N extra words containing card bits before the object. + extra_words = self.card_marking_words_for_length(length) + cardheadersize = WORD * extra_words + extra_flags = GCFLAG_HAS_CARDS + # + # Detect very rare cases of overflows + if raw_malloc_usage(totalsize) > (sys.maxint - (WORD-1) + - cardheadersize): + raise MemoryError("rare case of overflow") + # + # Now we know that the following computations cannot overflow. + # Note that round_up_for_allocation() is also needed to get the + # correct number added to 'rawmalloced_total_size'. + allocsize = (cardheadersize + raw_malloc_usage( + llarena.round_up_for_allocation(totalsize))) + # + # Allocate the object using arena_malloc(), which we assume here + # is just the same as raw_malloc(), but allows the extra + # flexibility of saying that we have extra words in the header. + # The memory returned is cleared by a raw_memclear(). + arena = llarena.arena_malloc(allocsize, 2) + if not arena: + raise MemoryError("cannot allocate large object") + # + # Reserve the card mark bits as a list of single bytes + # (the loop is empty in C). + i = 0 + while i < cardheadersize: + llarena.arena_reserve(arena + i, llmemory.sizeof(lltype.Char)) + i += 1 # - # The size asked for is too large for the ArenaCollection. - return self._external_malloc(typeid, totalsize) - # - totalsize = llarena.round_up_for_allocation(totalsize) - # - # If somebody calls _malloc_nonmovable() a lot, we must eventually - # force a full collection. - self._full_collect_if_needed(totalsize) - # - # Ask the ArenaCollection to do the malloc. - result = self.ac.malloc(totalsize) - llmemory.raw_memclear(result, totalsize) - self.init_gc_object(result, typeid, GCFLAG_NO_YOUNG_PTRS) - return result - - - def malloc_with_finalizer(self, typeid, totalsize): - """Allocate an object with a finalizer.""" + # Reserve the actual object. (This is also a no-op in C). + result = arena + cardheadersize + llarena.arena_reserve(result, totalsize) + # + # Record the newly allocated object and its full malloced size. + self.rawmalloced_total_size += allocsize + self.rawmalloced_objects.append(result + size_gc_header) # - result = self._malloc_nonmovable(typeid, totalsize) - size_gc_header = self.gcheaderbuilder.size_gc_header - self.objects_with_finalizers.append(result + size_gc_header) - return result - malloc_with_finalizer._dont_inline_ = True + # Common code to fill the header and length of the object. + self.init_gc_object(result, typeid, GCFLAG_NO_YOUNG_PTRS | extra_flags) + if self.is_varsize(typeid): + offset_to_length = self.varsize_offset_to_length(typeid) + (result + size_gc_header + offset_to_length).signed[0] = length + return result + size_gc_header # ---------- @@ -502,6 +564,11 @@ def can_malloc_nonmovable(self): return True + def can_optimize_clean_setarrayitems(self): + if self.card_page_indices > 0: + return False + return MovingGCBase.can_optimize_clean_setarrayitems(self) + def can_move(self, obj): """Overrides the parent can_move().""" return self.is_in_nursery(obj) @@ -529,37 +596,16 @@ def malloc_fixedsize_nonmovable(self, typeid): - """NOT_RPYTHON: not tested translated""" - size_gc_header = self.gcheaderbuilder.size_gc_header - totalsize = size_gc_header + self.fixed_size(typeid) - # - result = self._malloc_nonmovable(typeid, totalsize) - obj = result + size_gc_header + obj = self.external_malloc(typeid, 0) return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) def malloc_varsize_nonmovable(self, typeid, length): - size_gc_header = self.gcheaderbuilder.size_gc_header - nonvarsize = size_gc_header + self.fixed_size(typeid) - itemsize = self.varsize_item_sizes(typeid) - offset_to_length = self.varsize_offset_to_length(typeid) - try: - varsize = ovfcheck(itemsize * length) - totalsize = ovfcheck(nonvarsize + varsize) - except OverflowError: - raise MemoryError - # - result = self._malloc_nonmovable(typeid, totalsize) - obj = result + size_gc_header - (obj + offset_to_length).signed[0] = length + obj = self.external_malloc(typeid, length) return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) def malloc_nonmovable(self, typeid, length, zero): # helper for testing, same as GCBase.malloc - if self.is_varsize(typeid): - gcref = self.malloc_varsize_nonmovable(typeid, length) - else: - gcref = self.malloc_fixedsize_nonmovable(typeid) - return llmemory.cast_ptr_to_adr(gcref) + return self.external_malloc(typeid, length or 0) # None -> 0 # ---------- @@ -647,8 +693,9 @@ "unexpected GCFLAG_CARDS_SET") # if the GCFLAG_HAS_CARDS is set, check that all bits are zero now if self.header(obj).tid & GCFLAG_HAS_CARDS: - ll_assert(self.card_page_indices > 0, - "GCFLAG_HAS_CARDS but not using card marking") + if self.card_page_indices <= 0: + ll_assert(False, "GCFLAG_HAS_CARDS but not using card marking") + return typeid = self.get_type_id(obj) ll_assert(self.has_gcptr_in_varsize(typeid), "GCFLAG_HAS_CARDS but not has_gcptr_in_varsize") @@ -675,19 +722,23 @@ # "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS - def write_barrier(self, newvalue, addr_struct): + @classmethod + def JIT_max_size_of_young_obj(cls): + return cls.TRANSLATION_PARAMS['large_object'] + + def write_barrier(self, addr_struct): if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS: - self.remember_young_pointer(addr_struct, newvalue) + self.remember_young_pointer(addr_struct) - def write_barrier_from_array(self, newvalue, addr_array, index): + def write_barrier_from_array(self, addr_array, index): if self.header(addr_array).tid & GCFLAG_NO_YOUNG_PTRS: if self.card_page_indices > 0: # <- constant-folded - self.remember_young_pointer_from_array(addr_array, index, - newvalue) + self.remember_young_pointer_from_array(addr_array, index) else: - self.remember_young_pointer(addr_array, newvalue) + self.remember_young_pointer(addr_array) def _init_writebarrier_logic(self): + DEBUG = self.DEBUG # The purpose of attaching remember_young_pointer to the instance # instead of keeping it as a regular method is to help the JIT call it. # Additionally, it makes the code in write_barrier() marginally smaller @@ -695,30 +746,22 @@ # For x86, there is also an extra requirement: when the JIT calls # remember_young_pointer(), it assumes that it will not touch the SSE # registers, so it does not save and restore them (that's a *hack*!). - def remember_young_pointer(addr_struct, addr): - # 'addr_struct' is the address of the object in which we write; - # 'addr' is the address that we write in 'addr_struct'. - ll_assert(not self.is_in_nursery(addr_struct), - "nursery object with GCFLAG_NO_YOUNG_PTRS") - # if we have tagged pointers around, we first need to check whether - # we have valid pointer here, otherwise we can do it after the - # is_in_nursery check - if (self.config.taggedpointers and - not self.is_valid_gc_object(addr)): - return - # - # Core logic: if the 'addr' is in the nursery, then we need + def remember_young_pointer(addr_struct): + # 'addr_struct' is the address of the object in which we write. + if DEBUG: + ll_assert(not self.is_in_nursery(addr_struct), + "nursery object with GCFLAG_NO_YOUNG_PTRS") + # + # We assume that what we are writing is a pointer to the nursery + # (and don't care for the fact that this new pointer may not + # actually point to the nursery, which seems ok). What we need is # to remove the flag GCFLAG_NO_YOUNG_PTRS and add the old object # to the list 'old_objects_pointing_to_young'. We know that # 'addr_struct' cannot be in the nursery, because nursery objects # never have the flag GCFLAG_NO_YOUNG_PTRS to start with. + self.old_objects_pointing_to_young.append(addr_struct) objhdr = self.header(addr_struct) - if self.is_in_nursery(addr): - self.old_objects_pointing_to_young.append(addr_struct) - objhdr.tid &= ~GCFLAG_NO_YOUNG_PTRS - elif (not self.config.taggedpointers and - not self.is_valid_gc_object(addr)): - return + objhdr.tid &= ~GCFLAG_NO_YOUNG_PTRS # # Second part: if 'addr_struct' is actually a prebuilt GC # object and it's the first time we see a write to it, we @@ -737,17 +780,16 @@ def _init_writebarrier_with_card_marker(self): - def remember_young_pointer_from_array(addr_array, index, addr): + def remember_young_pointer_from_array(addr_array, index): # 'addr_array' is the address of the object in which we write, # which must have an array part; 'index' is the index of the - # item that is (or contains) the pointer that we write; - # 'addr' is the address that we write in the array. + # item that is (or contains) the pointer that we write. objhdr = self.header(addr_array) if objhdr.tid & GCFLAG_HAS_CARDS == 0: # # no cards, use default logic. The 'nocard_logic()' is just # 'remember_young_pointer()', but forced to be inlined here. - nocard_logic(addr_array, addr) + nocard_logic(addr_array) return # # 'addr_array' is a raw_malloc'ed array with card markers @@ -764,22 +806,13 @@ if byte & bitmask: return # - # As in remember_young_pointer, check if 'addr' is a valid - # pointer, in case it can be a tagged integer - if (self.config.taggedpointers and - not self.is_valid_gc_object(addr)): - return - # - # If the 'addr' is in the nursery, then we need to set the flag. - # Note that the following check is done after the bit check - # above, because it is expected that the "bit already set" - # situation is the most common. - if self.is_in_nursery(addr): - addr_byte.char[0] = chr(byte | bitmask) - # - if objhdr.tid & GCFLAG_CARDS_SET == 0: - self.old_objects_with_cards_set.append(addr_array) - objhdr.tid |= GCFLAG_CARDS_SET + # We set the flag (even if the newly written address does not + # actually point to the nursery -- like remember_young_pointer()). + addr_byte.char[0] = chr(byte | bitmask) + # + if objhdr.tid & GCFLAG_CARDS_SET == 0: + self.old_objects_with_cards_set.append(addr_array) + objhdr.tid |= GCFLAG_CARDS_SET nocard_logic = func_with_new_name(self.remember_young_pointer, 'remember_young_pointer_nocard') @@ -934,6 +967,8 @@ if cardbyte & 1: if interval_stop > length: interval_stop = length + ll_assert(cardbyte <= 1 and bytes == 0, + "premature end of object") self.trace_and_drag_out_of_nursery_partial( obj, interval_start, interval_stop) # @@ -997,7 +1032,7 @@ if self.header(obj).tid & GCFLAG_HAS_SHADOW == 0: # # Common case: allocate a new nonmovable location for it. - newhdr = self.ac.malloc(totalsize) + newhdr = self._malloc_out_of_nursery(totalsize) # else: # The object has already a shadow. @@ -1035,6 +1070,33 @@ self.old_objects_pointing_to_young.append(newobj) + def _malloc_out_of_nursery(self, totalsize): + """Allocate non-movable memory for an object of the given + 'totalsize' that lives so far in the nursery.""" + if raw_malloc_usage(totalsize) <= self.small_request_threshold: + # most common path + return self.ac.malloc(totalsize) + else: + # for nursery objects that are not small + return self._malloc_out_of_nursery_nonsmall(totalsize) + _malloc_out_of_nursery._always_inline_ = True + + def _malloc_out_of_nursery_nonsmall(self, totalsize): + # 'totalsize' should be aligned. + ll_assert(raw_malloc_usage(totalsize) & (WORD-1) == 0, + "misaligned totalsize in _malloc_out_of_nursery_nonsmall") + # + arena = llarena.arena_malloc(raw_malloc_usage(totalsize), False) + if not arena: + raise MemoryError("cannot allocate object") + llarena.arena_reserve(arena, totalsize) + # + size_gc_header = self.gcheaderbuilder.size_gc_header + self.rawmalloced_total_size += raw_malloc_usage(totalsize) + self.rawmalloced_objects.append(arena + size_gc_header) + return arena + + # ---------- # Full collection @@ -1104,30 +1166,26 @@ # Set the threshold for the next major collection to be when we # have allocated 'major_collection_threshold' times more than # we currently have. - self.next_major_collection_threshold = ( + bounded = self.set_major_threshold_from( (self.get_total_memory_used() * self.major_collection_threshold) + reserving_size) # # Max heap size: gives an upper bound on the threshold. If we # already have at least this much allocated, raise MemoryError. - if (self.max_heap_size > 0.0 and - self.next_major_collection_threshold > self.max_heap_size): + if bounded and (float(self.get_total_memory_used()) + reserving_size >= + self.next_major_collection_threshold): # - self.next_major_collection_threshold = self.max_heap_size - if (float(self.get_total_memory_used()) + reserving_size >= - self.next_major_collection_threshold): - # - # First raise MemoryError, giving the program a chance to - # quit cleanly. It might still allocate in the nursery, - # which might eventually be emptied, triggering another - # major collect and (possibly) reaching here again with an - # even higher memory consumption. To prevent it, if it's - # the second time we are here, then abort the program. - if self.max_heap_size_already_raised: - llop.debug_fatalerror(lltype.Void, - "Using too much memory, aborting") - self.max_heap_size_already_raised = True - raise MemoryError + # First raise MemoryError, giving the program a chance to + # quit cleanly. It might still allocate in the nursery, + # which might eventually be emptied, triggering another + # major collect and (possibly) reaching here again with an + # even higher memory consumption. To prevent it, if it's + # the second time we are here, then abort the program. + if self.max_heap_size_already_raised: + llop.debug_fatalerror(lltype.Void, + "Using too much memory, aborting") + self.max_heap_size_already_raised = True + raise MemoryError # # At the end, we can execute the finalizers of the objects # listed in 'run_finalizers'. Note that this will typically do @@ -1159,8 +1217,7 @@ self.rawmalloced_objects.append(obj) else: totalsize = size_gc_header + self.get_size(obj) - rawtotalsize = llmemory.raw_malloc_usage(totalsize) - self.rawmalloced_total_size -= rawtotalsize + allocsize = raw_malloc_usage(totalsize) arena = llarena.getfakearenaaddress(obj - size_gc_header) # # Must also include the card marker area, if any @@ -1175,8 +1232,10 @@ length = (obj + offset_to_length).signed[0] extra_words = self.card_marking_words_for_length(length) arena -= extra_words * WORD + allocsize += extra_words * WORD # llarena.arena_free(arena) + self.rawmalloced_total_size -= allocsize # list.delete() @@ -1260,7 +1319,8 @@ else: size_gc_header = self.gcheaderbuilder.size_gc_header size = self.get_size(obj) - shadowhdr = self.ac.malloc(size_gc_header + size) + shadowhdr = self._malloc_out_of_nursery(size_gc_header + + size) # initialize to an invalid tid *without* GCFLAG_VISITED, # so that if the object dies before the next minor # collection, the shadow will stay around but be collected @@ -1454,7 +1514,7 @@ self.total_memory_used = 0 def malloc(self, size): - nsize = llmemory.raw_malloc_usage(size) + nsize = raw_malloc_usage(size) ll_assert(nsize > 0, "malloc: size is null or negative") ll_assert(nsize <= self.small_request_threshold,"malloc: size too big") ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned") Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/minimarkpage.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gc/minimarkpage.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gc/minimarkpage.py Thu Sep 30 00:16:20 2010 @@ -4,6 +4,7 @@ from pypy.rlib.debug import ll_assert WORD = LONG_BIT // 8 +WORD_POWER_2 = {32: 2, 64: 3}[LONG_BIT] NULL = llmemory.NULL @@ -39,6 +40,9 @@ # -- The chained list of free blocks. If there are none, points to the # first uninitialized block. ('freeblock', llmemory.Address), + # -- The structure above is 4 words, which is a good value: + # '(1024-4) % N' is zero or very small for various small N's, + # i.e. there is not much wasted space. ) PAGE_PTR.TO.become(PAGE_HEADER) PAGE_NULL = lltype.nullptr(PAGE_HEADER) @@ -87,7 +91,7 @@ self.total_memory_used += nsize # # Get the page to use from the size - size_class = nsize / WORD + size_class = nsize >> WORD_POWER_2 page = self.page_for_size[size_class] if page == PAGE_NULL: page = self.allocate_new_page(size_class) @@ -190,7 +194,7 @@ self.total_memory_used = r_uint(0) # # For each size class: - size_class = self.small_request_threshold / WORD + size_class = self.small_request_threshold >> WORD_POWER_2 while size_class >= 1: # # Walk the pages in 'page_for_size[size_class]' and @@ -336,7 +340,7 @@ def _start_of_page_untranslated(addr, page_size): assert isinstance(addr, llarena.fakearenaaddress) - shift = 4 # for testing, we assume that the whole arena is not + shift = WORD # for testing, we assume that the whole arena is not # on a page boundary ofs = ((addr.offset - shift) // page_size) * page_size + shift return llarena.fakearenaaddress(addr.arena, ofs) Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py Thu Sep 30 00:16:20 2010 @@ -86,19 +86,17 @@ def write(self, p, fieldname, newvalue): if self.gc.needs_write_barrier: - newaddr = llmemory.cast_ptr_to_adr(newvalue) addr_struct = llmemory.cast_ptr_to_adr(p) - self.gc.write_barrier(newaddr, addr_struct) + self.gc.write_barrier(addr_struct) setattr(p, fieldname, newvalue) def writearray(self, p, index, newvalue): if self.gc.needs_write_barrier: - newaddr = llmemory.cast_ptr_to_adr(newvalue) addr_struct = llmemory.cast_ptr_to_adr(p) if hasattr(self.gc, 'write_barrier_from_array'): - self.gc.write_barrier_from_array(newaddr, addr_struct, index) + self.gc.write_barrier_from_array(addr_struct, index) else: - self.gc.write_barrier(newaddr, addr_struct) + self.gc.write_barrier(addr_struct) p[index] = newvalue def malloc(self, TYPE, n=None): @@ -507,8 +505,7 @@ for index, expected_x in nums.items(): assert a[index].x == expected_x self.stackroots.pop() - test_card_marker.GC_PARAMS = {"card_page_indices": 4, - "card_page_indices_min": 7} + test_card_marker.GC_PARAMS = {"card_page_indices": 4} class TestMiniMarkGCFull(DirectGCTest): from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimark.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimark.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimark.py Thu Sep 30 00:16:20 2010 @@ -5,26 +5,6 @@ # Note that most tests are in test_direct.py. -def test_stringbuilder_default_initsize_is_small(): - # Check that pypy.rlib.rstring.INIT_SIZE is short enough to let - # the allocated object be considered as a "small" object. - # Otherwise it would not be allocated in the nursery at all, - # which is kind of bad (and also prevents shrink_array() from - # being useful). - from pypy.rlib.rstring import INIT_SIZE - from pypy.rpython.lltypesystem.rstr import STR, UNICODE - # - size_gc_header = llmemory.raw_malloc_usage( - llmemory.sizeof(llmemory.Address)) - # - size1 = llmemory.raw_malloc_usage(llmemory.sizeof(STR, INIT_SIZE)) - size1 = size_gc_header + size1 - assert size1 <= MiniMarkGC.TRANSLATION_PARAMS["small_request_threshold"] - # - size2 = llmemory.raw_malloc_usage(llmemory.sizeof(UNICODE, INIT_SIZE)) - size2 = size_gc_header + size2 - assert size2 <= MiniMarkGC.TRANSLATION_PARAMS["small_request_threshold"] - def test_card_marking_words_for_length(): gc = MiniMarkGC(None, card_page_indices=128) assert gc.card_page_shift == 7 Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimarkpage.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimarkpage.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimarkpage.py Thu Sep 30 00:16:20 2010 @@ -7,22 +7,22 @@ from pypy.rpython.lltypesystem.llmemory import cast_ptr_to_adr NULL = llmemory.NULL -SHIFT = 4 +SHIFT = WORD hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) def test_allocate_arena(): - ac = ArenaCollection(SHIFT + 8*20, 8, 1) + ac = ArenaCollection(SHIFT + 16*20, 16, 1) ac.allocate_new_arena() assert ac.num_uninitialized_pages == 20 - ac.uninitialized_pages + 8*20 # does not raise - py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 8*20 + 1") + ac.uninitialized_pages + 16*20 # does not raise + py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 16*20 + 1") # - ac = ArenaCollection(SHIFT + 8*20 + 7, 8, 1) + ac = ArenaCollection(SHIFT + 16*20 + 7, 16, 1) ac.allocate_new_arena() assert ac.num_uninitialized_pages == 20 - ac.uninitialized_pages + 8*20 + 7 # does not raise - py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 8*20 + 8") + ac.uninitialized_pages + 16*20 + 7 # does not raise + py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 16*20 + 16") def test_allocate_new_page(): Modified: pypy/branch/fast-forward/pypy/rpython/memory/gctransform/framework.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gctransform/framework.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gctransform/framework.py Thu Sep 30 00:16:20 2010 @@ -139,7 +139,7 @@ def __init__(self, translator): from pypy.rpython.memory.gc.base import choose_gc_from_config from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP - from pypy.rpython.memory.gc import inspect + from pypy.rpython.memory.gc import inspector super(FrameworkGCTransformer, self).__init__(translator, inline=True) if hasattr(self, 'GC_PARAMS'): @@ -391,27 +391,27 @@ else: self.id_ptr = None - self.get_rpy_roots_ptr = getfn(inspect.get_rpy_roots, + self.get_rpy_roots_ptr = getfn(inspector.get_rpy_roots, [s_gc], rgc.s_list_of_gcrefs(), minimal_transform=False) - self.get_rpy_referents_ptr = getfn(inspect.get_rpy_referents, + self.get_rpy_referents_ptr = getfn(inspector.get_rpy_referents, [s_gc, s_gcref], rgc.s_list_of_gcrefs(), minimal_transform=False) - self.get_rpy_memory_usage_ptr = getfn(inspect.get_rpy_memory_usage, + self.get_rpy_memory_usage_ptr = getfn(inspector.get_rpy_memory_usage, [s_gc, s_gcref], annmodel.SomeInteger(), minimal_transform=False) - self.get_rpy_type_index_ptr = getfn(inspect.get_rpy_type_index, + self.get_rpy_type_index_ptr = getfn(inspector.get_rpy_type_index, [s_gc, s_gcref], annmodel.SomeInteger(), minimal_transform=False) - self.is_rpy_instance_ptr = getfn(inspect.is_rpy_instance, + self.is_rpy_instance_ptr = getfn(inspector.is_rpy_instance, [s_gc, s_gcref], annmodel.SomeBool(), minimal_transform=False) - self.dump_rpy_heap_ptr = getfn(inspect.dump_rpy_heap, + self.dump_rpy_heap_ptr = getfn(inspector.dump_rpy_heap, [s_gc, annmodel.SomeInteger()], annmodel.s_Bool, minimal_transform=False) @@ -426,7 +426,6 @@ if GCClass.needs_write_barrier: self.write_barrier_ptr = getfn(GCClass.write_barrier.im_func, [s_gc, - annmodel.SomeAddress(), annmodel.SomeAddress()], annmodel.s_None, inline=True) @@ -435,15 +434,13 @@ # func should not be a bound method, but a real function assert isinstance(func, types.FunctionType) self.write_barrier_failing_case_ptr = getfn(func, - [annmodel.SomeAddress(), - annmodel.SomeAddress()], + [annmodel.SomeAddress()], annmodel.s_None) func = getattr(GCClass, 'write_barrier_from_array', None) if func is not None: self.write_barrier_from_array_ptr = getfn(func.im_func, [s_gc, annmodel.SomeAddress(), - annmodel.SomeAddress(), annmodel.SomeInteger()], annmodel.s_None, inline=True) @@ -455,8 +452,7 @@ self.write_barrier_from_array_failing_case_ptr = \ getfn(func, [annmodel.SomeAddress(), - annmodel.SomeInteger(), - annmodel.SomeAddress()], + annmodel.SomeInteger()], annmodel.s_None) self.statistics_ptr = getfn(GCClass.statistics.im_func, [s_gc, annmodel.SomeInteger()], @@ -610,8 +606,10 @@ if self.write_barrier_ptr: self.clean_sets = ( - find_clean_setarrayitems(self.collect_analyzer, graph).union( - find_initializing_stores(self.collect_analyzer, graph))) + find_initializing_stores(self.collect_analyzer, graph)) + if self.gcdata.gc.can_optimize_clean_setarrayitems(): + self.clean_sets = self.clean_sets.union( + find_clean_setarrayitems(self.collect_analyzer, graph)) super(FrameworkGCTransformer, self).transform_graph(graph) if self.write_barrier_ptr: self.clean_sets = None @@ -1023,8 +1021,6 @@ and not isinstance(v_newvalue, Constant) and v_struct.concretetype.TO._gckind == "gc" and hop.spaceop not in self.clean_sets): - v_newvalue = hop.genop("cast_ptr_to_adr", [v_newvalue], - resulttype = llmemory.Address) v_structaddr = hop.genop("cast_ptr_to_adr", [v_struct], resulttype = llmemory.Address) if (self.write_barrier_from_array_ptr is not None and @@ -1034,14 +1030,12 @@ assert v_index.concretetype == lltype.Signed hop.genop("direct_call", [self.write_barrier_from_array_ptr, self.c_const_gc, - v_newvalue, v_structaddr, v_index]) else: self.write_barrier_calls += 1 hop.genop("direct_call", [self.write_barrier_ptr, self.c_const_gc, - v_newvalue, v_structaddr]) hop.rename('bare_' + opname) Modified: pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py Thu Sep 30 00:16:20 2010 @@ -94,7 +94,6 @@ assert (type(index) is int # <- fast path or lltype.typeOf(index) == lltype.Signed) self.gc.write_barrier_from_array( - llmemory.cast_ptr_to_adr(newvalue), llmemory.cast_ptr_to_adr(toplevelcontainer), index) wb = False @@ -102,7 +101,6 @@ # if wb: self.gc.write_barrier( - llmemory.cast_ptr_to_adr(newvalue), llmemory.cast_ptr_to_adr(toplevelcontainer)) llheap.setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue) Modified: pypy/branch/fast-forward/pypy/rpython/memory/test/test_gc.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/test/test_gc.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/test/test_gc.py Thu Sep 30 00:16:20 2010 @@ -29,6 +29,7 @@ GC_CAN_MALLOC_NONMOVABLE = True GC_CAN_SHRINK_ARRAY = False GC_CAN_SHRINK_BIG_ARRAY = False + BUT_HOW_BIG_IS_A_BIG_STRING = 3*WORD def setup_class(cls): cls._saved_logstate = py.log._getstate() @@ -495,7 +496,8 @@ # with larger numbers, it gets allocated outside the semispace # with some GCs. flag = self.GC_CAN_SHRINK_BIG_ARRAY - assert self.interpret(f, [12, 0, flag]) == 0x62024241 + bigsize = self.BUT_HOW_BIG_IS_A_BIG_STRING + assert self.interpret(f, [bigsize, 0, flag]) == 0x62024241 def test_tagged_simple(self): from pypy.rlib.objectmodel import UnboxedValue @@ -770,7 +772,7 @@ from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass GC_CAN_SHRINK_BIG_ARRAY = False GC_CAN_MALLOC_NONMOVABLE = True + BUT_HOW_BIG_IS_A_BIG_STRING = 11*WORD class TestMiniMarkGCCardMarking(TestMiniMarkGC): - GC_PARAMS = {'card_page_indices': 4, - 'card_page_indices_min': 10} + GC_PARAMS = {'card_page_indices': 4} Modified: pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py ============================================================================== --- pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py (original) +++ pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py Thu Sep 30 00:16:20 2010 @@ -1474,11 +1474,37 @@ 'page_size': 16*WORD, 'arena_size': 64*WORD, 'small_request_threshold': 5*WORD, + 'large_object': 8*WORD, + 'large_object_gcptrs': 10*WORD, 'card_page_indices': 4, - 'card_page_indices_min': 10, } root_stack_depth = 200 + def define_no_clean_setarrayitems(cls): + # The optimization find_clean_setarrayitems() in + # gctransformer/framework.py does not work with card marking. + # Check that it is turned off. + S = lltype.GcStruct('S', ('x', lltype.Signed)) + A = lltype.GcArray(lltype.Ptr(S)) + def sub(lst): + lst[15] = lltype.malloc(S) # 'lst' is set the single mark "12-15" + lst[15].x = 123 + lst[0] = lst[15] # that would be a "clean_setarrayitem" + def f(): + lst = lltype.malloc(A, 16) # 16 > 10 + rgc.collect() + sub(lst) + null = lltype.nullptr(S) + lst[15] = null # clear, so that A() is only visible via lst[0] + rgc.collect() # -> crash + return lst[0].x + return f + + def test_no_clean_setarrayitems(self): + run = self.runner("no_clean_setarrayitems") + res = run([]) + assert res == 123 + # ________________________________________________________________ # tagged pointers Modified: pypy/branch/fast-forward/pypy/tool/progressbar.py ============================================================================== --- pypy/branch/fast-forward/pypy/tool/progressbar.py (original) +++ pypy/branch/fast-forward/pypy/tool/progressbar.py Thu Sep 30 00:16:20 2010 @@ -17,11 +17,11 @@ ) PADDING = 7 - def __init__(self, color=None, width=None, block='?', empty=' '): + def __init__(self, color=None, width=None, block='.', empty=' '): """ color -- color name (BLUE GREEN CYAN RED MAGENTA YELLOW WHITE BLACK) width -- bar width (optinal) - block -- progress display character (default '?') + block -- progress display character (default '.') empty -- bar display character (default ' ') """ if color: Modified: pypy/branch/fast-forward/pypy/translator/c/gcc/trackgcroot.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/gcc/trackgcroot.py (original) +++ pypy/branch/fast-forward/pypy/translator/c/gcc/trackgcroot.py Thu Sep 30 00:16:20 2010 @@ -913,6 +913,7 @@ visit_leaq = FunctionGcRootTracker._visit_lea visit_xorq = FunctionGcRootTracker.binary_insn + visit_xchgq = FunctionGcRootTracker._visit_xchg # FIXME: similar to visit_popl for 32-bit def visit_popq(self, line): Modified: pypy/branch/fast-forward/pypy/translator/c/genc.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/genc.py (original) +++ pypy/branch/fast-forward/pypy/translator/c/genc.py Thu Sep 30 00:16:20 2010 @@ -592,7 +592,7 @@ if sys.platform == 'win32': python = sys.executable.replace('\\', '/') + ' ' else: - python = '' + python = sys.executable + ' ' if self.translator.platform.name == 'msvc': lblofiles = [] Modified: pypy/branch/fast-forward/pypy/translator/c/test/test_lltyped.py ============================================================================== --- pypy/branch/fast-forward/pypy/translator/c/test/test_lltyped.py (original) +++ pypy/branch/fast-forward/pypy/translator/c/test/test_lltyped.py Thu Sep 30 00:16:20 2010 @@ -783,6 +783,17 @@ res = fn() assert res == 42 + def test_llarena(self): + from pypy.rpython.lltypesystem import llmemory, llarena + # + def f(): + a = llarena.arena_malloc(800, False) + llarena.arena_reset(a, 800, 2) + llarena.arena_free(a) + # + fn = self.getcompiled(f, []) + fn() + def test_padding_in_prebuilt_struct(self): from pypy.rpython.lltypesystem import rffi from pypy.rpython.tool import rffi_platform From agaynor at codespeak.net Thu Sep 30 00:50:41 2010 From: agaynor at codespeak.net (agaynor at codespeak.net) Date: Thu, 30 Sep 2010 00:50:41 +0200 (CEST) Subject: [pypy-svn] r77478 - pypy/branch/fast-forward/pypy/objspace/std Message-ID: <20100929225041.2DF04282B9E@codespeak.net> Author: agaynor Date: Thu Sep 30 00:50:39 2010 New Revision: 77478 Modified: pypy/branch/fast-forward/pypy/objspace/std/floattype.py Log: Added real and imag properties to floats. Modified: pypy/branch/fast-forward/pypy/objspace/std/floattype.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/floattype.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/floattype.py Thu Sep 30 00:50:39 2010 @@ -2,7 +2,7 @@ import sys from pypy.rlib.unroll import unrolling_iterable from pypy.rlib import rfloat, rarithmetic -from pypy.interpreter import gateway +from pypy.interpreter import gateway, typedef from pypy.interpreter.baseobjspace import ObjSpace, W_Root from pypy.interpreter.error import OperationError from pypy.objspace.std.stdtypedef import StdTypeDef, SMM @@ -213,6 +213,11 @@ w_float = space.wrap(sign * value) return space.call_function(w_cls, w_float) +def descr_get_real(space, w_obj): + return w_obj + +def descr_get_imag(space, w_obj): + return space.wrap(0.0) # ____________________________________________________________ @@ -227,5 +232,7 @@ fromhex = gateway.interp2app(descr_fromhex, unwrap_spec=[ObjSpace, W_Root, str], as_classmethod=True), - ) + real = typedef.GetSetProperty(descr_get_real), + imag = typedef.GetSetProperty(descr_get_imag), +) float_typedef.registermethods(globals()) From afa at codespeak.net Thu Sep 30 00:54:23 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Thu, 30 Sep 2010 00:54:23 +0200 (CEST) Subject: [pypy-svn] r77479 - pypy/branch/fast-forward/lib-python/modified-2.7.0/test Message-ID: <20100929225423.D4204282B9E@codespeak.net> Author: afa Date: Thu Sep 30 00:54:22 2010 New Revision: 77479 Added: pypy/branch/fast-forward/lib-python/modified-2.7.0/test/test_threading.py - copied, changed from r77476, pypy/branch/fast-forward/lib-python/2.7.0/test/test_threading.py Log: skip a test, it blocks the test suite Copied: pypy/branch/fast-forward/lib-python/modified-2.7.0/test/test_threading.py (from r77476, pypy/branch/fast-forward/lib-python/2.7.0/test/test_threading.py) ============================================================================== --- pypy/branch/fast-forward/lib-python/2.7.0/test/test_threading.py (original) +++ pypy/branch/fast-forward/lib-python/modified-2.7.0/test/test_threading.py Thu Sep 30 00:54:22 2010 @@ -458,6 +458,7 @@ """ self._run_and_join(script) + @unittest.skip("FIXME: pypy should have an 'after_fork' hook") def test_3_join_in_forked_from_thread(self): # Like the test above, but fork() was called from a worker thread # In the forked process, the main Thread object must be marked as stopped. From agaynor at codespeak.net Thu Sep 30 01:20:59 2010 From: agaynor at codespeak.net (agaynor at codespeak.net) Date: Thu, 30 Sep 2010 01:20:59 +0200 (CEST) Subject: [pypy-svn] r77480 - pypy/branch/fast-forward/pypy/module/itertools Message-ID: <20100929232059.B1B48282B9E@codespeak.net> Author: agaynor Date: Thu Sep 30 01:20:58 2010 New Revision: 77480 Modified: pypy/branch/fast-forward/pypy/module/itertools/interp_itertools.py Log: Added the step parameter to itertools.count Modified: pypy/branch/fast-forward/pypy/module/itertools/interp_itertools.py ============================================================================== --- pypy/branch/fast-forward/pypy/module/itertools/interp_itertools.py (original) +++ pypy/branch/fast-forward/pypy/module/itertools/interp_itertools.py Thu Sep 30 01:20:58 2010 @@ -7,9 +7,10 @@ class W_Count(Wrappable): - def __init__(self, space, firstval): + def __init__(self, space, firstval, step): self.space = space self.c = firstval + self.step = step def iter_w(self): return self.space.wrap(self) @@ -17,7 +18,7 @@ def next_w(self): c = self.c try: - self.c = ovfcheck(self.c + 1) + self.c = ovfcheck(self.c + self.step) except OverflowError: raise OperationError(self.space.w_OverflowError, self.space.wrap("cannot count beyond sys.maxint")) @@ -25,16 +26,20 @@ return self.space.wrap(c) def repr_w(self): - s = 'count(%d)' % (self.c,) + if self.step == 1: + s = 'count(%d)' % (self.c,) + else: + s = 'count(%d, %d)' % (self.c, self.step) return self.space.wrap(s) + -def W_Count___new__(space, w_subtype, firstval=0): - return space.wrap(W_Count(space, firstval)) +def W_Count___new__(space, w_subtype, firstval=0, step=1): + return space.wrap(W_Count(space, firstval, step)) W_Count.typedef = TypeDef( 'count', - __new__ = interp2app(W_Count___new__, unwrap_spec=[ObjSpace, W_Root, int]), + __new__ = interp2app(W_Count___new__, unwrap_spec=[ObjSpace, W_Root, int, int]), __iter__ = interp2app(W_Count.iter_w, unwrap_spec=['self']), next = interp2app(W_Count.next_w, unwrap_spec=['self']), __repr__ = interp2app(W_Count.repr_w, unwrap_spec=['self']), From antocuni at codespeak.net Thu Sep 30 10:06:54 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Thu, 30 Sep 2010 10:06:54 +0200 (CEST) Subject: [pypy-svn] r77482 - in pypy/branch/jitffi/pypy/jit/metainterp: optimizeopt test Message-ID: <20100930080654.D65C0282BE8@codespeak.net> Author: antocuni Date: Thu Sep 30 10:06:52 2010 New Revision: 77482 Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/fficall.py pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py Log: attach the dynamic calldescr to the new emitted CALL operation Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/fficall.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/fficall.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/fficall.py Thu Sep 30 10:06:52 2010 @@ -5,17 +5,18 @@ from pypy.jit.metainterp.optimizeutil import _findall from pypy.jit.metainterp.optimizeopt.optimizer import Optimization -class FuncDescription(object): +class FuncInfo(object): def __init__(self, cpu, func): self.func = func - self.args = [] + self.opargs = [] + self.descr = cpu.calldescrof_dynamic(func.argtypes, func.restype) class OptFfiCall(Optimization): def __init__(self): - self.funcs = {} + self.func_infos = {} def get_oopspec(self, funcval): # XXX: not RPython at all, just a hack while waiting to have an @@ -60,25 +61,25 @@ def do_prepare_call(self, op): func = self._get_func(op) - assert func not in self.funcs # XXX: do something nice etc. etc. - self.funcs[func] = FuncDescription(self.optimizer.cpu, func) + assert func not in self.func_infos # XXX: do something nice etc. etc. + self.func_infos[func] = FuncInfo(self.optimizer.cpu, func) def do_push_arg(self, op): # we store the op in funcs because we might want to emit it later, # in case we give up with the optimization func = self._get_func(op) - self.funcs[func].args.append(op) + self.func_infos[func].opargs.append(op) def do_call(self, op): func = self._get_func(op) funcsymval = self.getvalue(op.getarg(2)) arglist = [funcsymval.force_box()] - for push_op in self.funcs[func].args: + info = self.func_infos[func] + for push_op in info.opargs: argval = self.getvalue(push_op.getarg(2)) arglist.append(argval.force_box()) - # XXX: add the descr - newop = ResOperation(rop.CALL, arglist, op.result, None) - del self.funcs[func] + newop = ResOperation(rop.CALL, arglist, op.result, descr=info.descr) + del self.func_infos[func] return newop def propagate_forward(self, op): Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py Thu Sep 30 10:06:52 2010 @@ -3900,7 +3900,24 @@ # ------------------------------------------------ from pypy.rpython.lltypesystem import llmemory -from pypy.rlib.libffi import Func +from pypy.rlib.libffi import Func, ffi_type_sint, ffi_type_double +from pypy.jit.metainterp.history import AbstractDescr + +class MyCallDescr(AbstractDescr): + """ + Fake calldescr to be used inside the tests. + + The particularity is that it provides an __eq__ method, so that it + comparses by value by comparing the arg_types and typeinfo fields, so you + can check that the signature of a call is really what you want. + """ + + def __init__(self, arg_types, typeinfo): + self.arg_types = arg_types + self.typeinfo = typeinfo # return type + + def __eq__(self, other): + return self.arg_types == other.arg_types and self.typeinfo == other.typeinfo class FakeLLObject(object): @@ -3916,8 +3933,11 @@ class namespace: cpu = LLtypeMixin.cpu plaincalldescr = LLtypeMixin.plaincalldescr + int_float__int = MyCallDescr('if', 'i') funcptr = FakeLLObject() - func = FakeLLObject(_fake_class=Func) + func = FakeLLObject(_fake_class=Func, + argtypes=[ffi_type_sint, ffi_type_double], + restype=ffi_type_sint) namespace = namespace.__dict__ @@ -3933,7 +3953,7 @@ """ expected = """ [i0, f1] - i3 = call(1, i0, f1) + i3 = call(1, i0, f1, descr=int_float__int) jump(i3, f1) """ loop = self.optimize_loop(ops, 'Not, Not', expected) From antocuni at codespeak.net Thu Sep 30 10:58:09 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Thu, 30 Sep 2010 10:58:09 +0200 (CEST) Subject: [pypy-svn] r77483 - in pypy/branch/jitffi/pypy: jit/metainterp/optimizeopt rlib Message-ID: <20100930085809.0C7AB282BE8@codespeak.net> Author: antocuni Date: Thu Sep 30 10:58:08 2010 New Revision: 77483 Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/fficall.py pypy/branch/jitffi/pypy/rlib/libffi.py Log: refactor until we manage to get a nice optimized graph from test_direct_call.py, see the docstring of _get_signature for details. The optimized graph still cannot be executed by the llgraph backend, because it doesn't know how to CALL a real C function Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/fficall.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/fficall.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/fficall.py Thu Sep 30 10:58:08 2010 @@ -7,10 +7,49 @@ class FuncInfo(object): - def __init__(self, cpu, func): - self.func = func + def __init__(self, funcval, cpu): self.opargs = [] - self.descr = cpu.calldescrof_dynamic(func.argtypes, func.restype) + argtypes, restype = self._get_signature(funcval) + self.descr = cpu.calldescrof_dynamic(argtypes, restype) + + def _get_signature(self, funcval): + """ + given the funcval, return a tuple (argtypes, restype), where the + actuall types are libffi.ffi_type_* + + The implementation is tricky because we have three possible cases: + + - translated: the easiest case, we can just cast back the pointer to + the original Func instance and read .argtypes and .restype + + - completely untranslated: this is what we get from test_optimizeopt + tests. funcval contains a FakeLLObject whose _fake_class is Func, + and we can just get .argtypes and .restype + + - partially translated: this happens when running metainterp tests: + funcval contains the low-level equivalent of a Func, and thus we + have to fish inst_argtypes and inst_restype by hand. Note that + inst_argtypes is actually a low-level array, but we can use it + directly since the only thing we do with it is to read its items + """ + + llfunc = funcval.box.getref_base() + if we_are_translated(): + XXX + elif getattr(llfunc, '_fake_class', None) is Func: + # untranslated + return llfunc.argtypes, llfunc.restype + else: + # partially translated + # llfunc contains an opaque pointer to something like the following: + # + # + # Unfortunately, we cannot use the proper lltype.cast_opaque_ptr, + # because we don't have the exact TYPE to cast to. Instead, we + # just fish it manually :-( + f = llfunc._obj.container + return f.inst_argtypes, f.inst_restype class OptFfiCall(Optimization): @@ -43,43 +82,32 @@ op = self.do_call(op) self.emit_operation(op) - def _cast_to_high_level(self, Class, obj): - if we_are_translated(): - XXX - else: - # this is just for the tests in test_optimizeopt.py - cls = getattr(obj, '_fake_class', obj.__class__) - assert issubclass(cls, Class) - return obj - - def _get_func(self, op): + def _get_funcval(self, op): funcval = self.getvalue(op.getarg(1)) assert funcval.is_constant() # XXX: do something nice if it's not constant - llfunc = funcval.box.getref_base() - func = self._cast_to_high_level(Func, llfunc) - return func + return funcval def do_prepare_call(self, op): - func = self._get_func(op) - assert func not in self.func_infos # XXX: do something nice etc. etc. - self.func_infos[func] = FuncInfo(self.optimizer.cpu, func) + funcval = self._get_funcval(op) + assert funcval not in self.func_infos # XXX: do something nice etc. etc. + self.func_infos[funcval] = FuncInfo(funcval, self.optimizer.cpu) def do_push_arg(self, op): # we store the op in funcs because we might want to emit it later, # in case we give up with the optimization - func = self._get_func(op) - self.func_infos[func].opargs.append(op) + funcval = self._get_funcval(op) + self.func_infos[funcval].opargs.append(op) def do_call(self, op): - func = self._get_func(op) + funcval = self._get_funcval(op) + info = self.func_infos[funcval] funcsymval = self.getvalue(op.getarg(2)) arglist = [funcsymval.force_box()] - info = self.func_infos[func] for push_op in info.opargs: argval = self.getvalue(push_op.getarg(2)) arglist.append(argval.force_box()) newop = ResOperation(rop.CALL, arglist, op.result, descr=info.descr) - del self.func_infos[func] + del self.func_infos[funcval] return newop def propagate_forward(self, op): Modified: pypy/branch/jitffi/pypy/rlib/libffi.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/libffi.py (original) +++ pypy/branch/jitffi/pypy/rlib/libffi.py Thu Sep 30 10:58:08 2010 @@ -52,7 +52,7 @@ def _do_call(self, funcsym, RESULT): return self.funcptr.call(RESULT) - _do_call._annspecialcase_ = 'specialize:arg(1)' + _do_call._annspecialcase_ = 'specialize:arg(2)' _do_call.oopspec = 'libffi_call(self, funcsym, RESULT)' @jit.unroll_safe From afa at codespeak.net Thu Sep 30 11:03:43 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Thu, 30 Sep 2010 11:03:43 +0200 (CEST) Subject: [pypy-svn] r77484 - in pypy/branch/fast-forward/pypy/objspace/std: . test Message-ID: <20100930090343.A8C4C282BE8@codespeak.net> Author: afa Date: Thu Sep 30 11:03:42 2010 New Revision: 77484 Modified: pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py pypy/branch/fast-forward/pypy/objspace/std/bytearraytype.py pypy/branch/fast-forward/pypy/objspace/std/test/test_bytes.py Log: Add string-like methods to bytearray. Not very efficient: we convert to a string, and then back to bytearray... Modified: pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py Thu Sep 30 11:03:42 2010 @@ -222,8 +222,90 @@ raise OperationError(space.w_ValueError, space.wrap("bytearray.index(x): x not in bytearray")) +def str_join__Bytearray_ANY(space, w_self, w_list): + list_w = space.listview(w_list) + if not list_w: + return W_BytearrayObject([]) + data = w_self.data + reslen = 0 + for i in range(len(list_w)): + w_s = list_w[i] + if not (space.is_true(space.isinstance(w_s, space.w_str)) or + space.is_true(space.isinstance(w_s, space.w_bytearray))): + raise operationerrfmt( + space.w_TypeError, + "sequence item %d: expected string, %s " + "found", i, space.type(w_s).getname(space, '?')) + reslen += len(space.str_w(w_s)) + newdata = [] + for i in range(len(list_w)): + if data and i != 0: + newdata.extend(data) + newdata.extend(c for c in space.str_w(list_w[i])) + return W_BytearrayObject(newdata) + # These methods could just delegate to the string implementation, # but they have to return a bytearray. +def str_replace__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_str1, w_str2, w_max): + w_str = delegate_Bytearray2String(space, w_bytearray) + w_res = space.call_method(w_str, "replace", w_str1, w_str2, w_max) + return String2Bytearray(space, w_res) + +def str_upper__Bytearray(space, w_bytearray): + w_str = delegate_Bytearray2String(space, w_bytearray) + w_res = space.call_method(w_str, "upper") + return String2Bytearray(space, w_res) + +def str_lower__Bytearray(space, w_bytearray): + w_str = delegate_Bytearray2String(space, w_bytearray) + w_res = space.call_method(w_str, "lower") + return String2Bytearray(space, w_res) + +def str_title__Bytearray(space, w_bytearray): + w_str = delegate_Bytearray2String(space, w_bytearray) + w_res = space.call_method(w_str, "title") + return String2Bytearray(space, w_res) + +def str_swapcase__Bytearray(space, w_bytearray): + w_str = delegate_Bytearray2String(space, w_bytearray) + w_res = space.call_method(w_str, "swapcase") + return String2Bytearray(space, w_res) + +def str_capitalize__Bytearray(space, w_bytearray): + w_str = delegate_Bytearray2String(space, w_bytearray) + w_res = space.call_method(w_str, "capitalize") + return String2Bytearray(space, w_res) + +def str_lstrip__Bytearray_ANY(space, w_bytearray, w_chars): + w_str = delegate_Bytearray2String(space, w_bytearray) + w_res = space.call_method(w_str, "lstrip", w_chars) + return String2Bytearray(space, w_res) + +def str_rstrip__Bytearray_ANY(space, w_bytearray, w_chars): + w_str = delegate_Bytearray2String(space, w_bytearray) + w_res = space.call_method(w_str, "rstrip", w_chars) + return String2Bytearray(space, w_res) + +def str_strip__Bytearray_ANY(space, w_bytearray, w_chars): + w_str = delegate_Bytearray2String(space, w_bytearray) + w_res = space.call_method(w_str, "strip", w_chars) + return String2Bytearray(space, w_res) + +def str_ljust__Bytearray_ANY_ANY(space, w_bytearray, w_width, w_fillchar): + w_str = delegate_Bytearray2String(space, w_bytearray) + w_res = space.call_method(w_str, "ljust", w_width, w_fillchar) + return String2Bytearray(space, w_res) + +def str_rjust__Bytearray_ANY_ANY(space, w_bytearray, w_width, w_fillchar): + w_str = delegate_Bytearray2String(space, w_bytearray) + w_res = space.call_method(w_str, "rjust", w_width, w_fillchar) + return String2Bytearray(space, w_res) + +def str_center__Bytearray_ANY_ANY(space, w_bytearray, w_width, w_fillchar): + w_str = delegate_Bytearray2String(space, w_bytearray) + w_res = space.call_method(w_str, "center", w_width, w_fillchar) + return String2Bytearray(space, w_res) + def str_zfill__Bytearray_ANY(space, w_bytearray, w_width): w_str = delegate_Bytearray2String(space, w_bytearray) w_res = space.call_method(w_str, "zfill", w_width) @@ -234,5 +316,39 @@ w_res = space.call_method(w_str, "expandtabs", w_tabsize) return String2Bytearray(space, w_res) +def str_split__Bytearray_ANY_ANY(space, w_bytearray, w_by, w_maxsplit=-1): + w_str = delegate_Bytearray2String(space, w_bytearray) + w_list = space.call_method(w_str, "split", w_by, w_maxsplit) + list_w = space.listview(w_list) + for i in range(len(list_w)): + list_w[i] = String2Bytearray(space, list_w[i]) + return w_list + +def str_rsplit__Bytearray_ANY_ANY(space, w_bytearray, w_by, w_maxsplit=-1): + w_str = delegate_Bytearray2String(space, w_bytearray) + w_list = space.call_method(w_str, "rsplit", w_by, w_maxsplit) + list_w = space.listview(w_list) + for i in range(len(list_w)): + list_w[i] = String2Bytearray(space, list_w[i]) + return w_list + +def str_partition__Bytearray_ANY(space, w_bytearray, w_sub): + w_str = delegate_Bytearray2String(space, w_bytearray) + w_tuple = space.call_method(w_str, "partition", w_sub) + w_a, w_b, w_c = space.fixedview(w_tuple, 3) + return space.newtuple([ + String2Bytearray(space, w_a), + String2Bytearray(space, w_b), + String2Bytearray(space, w_c)]) + +def str_rpartition__Bytearray_ANY(space, w_bytearray, w_sub): + w_str = delegate_Bytearray2String(space, w_bytearray) + w_tuple = space.call_method(w_str, "rpartition", w_sub) + w_a, w_b, w_c = space.fixedview(w_tuple, 3) + return space.newtuple([ + String2Bytearray(space, w_a), + String2Bytearray(space, w_b), + String2Bytearray(space, w_c)]) + from pypy.objspace.std import bytearraytype register_all(vars(), bytearraytype) Modified: pypy/branch/fast-forward/pypy/objspace/std/bytearraytype.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/bytearraytype.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/bytearraytype.py Thu Sep 30 11:03:42 2010 @@ -4,10 +4,15 @@ from pypy.objspace.std.register_all import register_all from pypy.objspace.std.stdtypedef import StdTypeDef, SMM, no_hash_descr -from pypy.objspace.std.stringtype import str_islower, str_isupper -from pypy.objspace.std.stringtype import str_count, str_index -from pypy.objspace.std.stringtype import str_expandtabs, str_zfill -from pypy.objspace.std.stringtype import str_splitlines +from pypy.objspace.std.stringtype import ( + str_count, str_index, str_rindex, str_find, str_rfind, str_replace, + str_startswith, str_endswith, str_islower, str_isupper, str_isalpha, + str_isalnum, str_isdigit, str_isspace, str_istitle, + str_upper, str_lower, str_title, str_swapcase, str_capitalize, + str_expandtabs, str_lstrip, str_rstrip, str_strip, + str_ljust, str_rjust, str_center, str_zfill, + str_join, str_split, str_rsplit, str_partition, str_rpartition, + str_splitlines) @gateway.unwrap_spec(ObjSpace, W_Root, W_Root, W_Root, W_Root) def descr__new__(space, w_bytearraytype, Modified: pypy/branch/fast-forward/pypy/objspace/std/test/test_bytes.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/test/test_bytes.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/test/test_bytes.py Thu Sep 30 11:03:42 2010 @@ -60,18 +60,64 @@ def test_stringlike_operations(self): assert bytearray('hello').islower() assert bytearray('HELLO').isupper() + assert bytearray('hello').isalpha() + assert not bytearray('hello2').isalpha() + assert bytearray('hello2').isalnum() + assert bytearray('1234').isdigit() + assert bytearray(' ').isspace() + assert bytearray('Abc').istitle() assert bytearray('hello').count('l') == 2 assert bytearray('hello').count(bytearray('l')) == 2 assert bytearray('hello').count(ord('l')) == 2 assert bytearray('hello').index('e') == 1 - assert bytearray('hello').count(bytearray('e')) == 1 + assert bytearray('hello').rindex('l') == 3 + assert bytearray('hello').index(bytearray('e')) == 1 assert bytearray('hello').index(ord('e')) == 1 + assert bytearray('hello').find('l') == 2 + assert bytearray('hello').rfind('l') == 3 - r = bytearray('1').zfill(5) - assert type(r) is bytearray and r == '00001' - r = bytearray('1\t2').expandtabs(5) - assert type(r) is bytearray and r == '1 2' + assert bytearray('hello').startswith('he') + assert bytearray('hello').startswith(bytearray('he')) + assert bytearray('hello').endswith('lo') + assert bytearray('hello').endswith(bytearray('lo')) + def test_stringlike_conversions(self): + # methods that should return bytearray (and not str) + def check(result, expected): + assert result == expected + assert type(result) is bytearray + check(bytearray('abc').replace('b', bytearray('d')), 'adc') + + check(bytearray('abc').upper(), 'ABC') + check(bytearray('ABC').lower(), 'abc') + check(bytearray('abc').title(), 'Abc') + check(bytearray('AbC').swapcase(), 'aBc') + check(bytearray('abC').capitalize(), 'Abc') + + check(bytearray('abc').ljust(5), 'abc ') + check(bytearray('abc').rjust(5), ' abc') + check(bytearray('abc').center(5), ' abc ') + check(bytearray('1').zfill(5), '00001') + check(bytearray('1\t2').expandtabs(5), '1 2') + + check(bytearray(',').join(['a', bytearray('b')]), 'a,b') + check(bytearray('abc').lstrip('a'), 'bc') + check(bytearray('abc').rstrip('c'), 'ab') + check(bytearray('aba').strip('a'), 'b') + + def test_split(self): + # methods that should return a sequence of bytearrays + def check(result, expected): + assert result == expected + assert set(type(x) for x in result) == set([bytearray]) + + b = bytearray('mississippi') + check(b.split('i'), [b'm', b'ss', b'ss', b'pp', b'']) + check(b.rsplit('i'), [b'm', b'ss', b'ss', b'pp', b'']) + check(b.rsplit('i', 2), [b'mississ', b'pp', b'']) + + check(b.partition(b'ss'), (b'mi', b'ss', b'issippi')) + check(b.rpartition(b'ss'), (b'missi', b'ss', b'ippi')) From afa at codespeak.net Thu Sep 30 11:31:19 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Thu, 30 Sep 2010 11:31:19 +0200 (CEST) Subject: [pypy-svn] r77485 - pypy/branch/fast-forward/pypy/objspace/std Message-ID: <20100930093119.136C2282BE8@codespeak.net> Author: afa Date: Thu Sep 30 11:31:17 2010 New Revision: 77485 Modified: pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py Log: Translation fixes Modified: pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/bytearrayobject.py Thu Sep 30 11:31:17 2010 @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, operationerrfmt from pypy.objspace.std.model import registerimplementation, W_Object from pypy.objspace.std.register_all import register_all from pypy.objspace.std.inttype import wrapint @@ -241,7 +241,7 @@ for i in range(len(list_w)): if data and i != 0: newdata.extend(data) - newdata.extend(c for c in space.str_w(list_w[i])) + newdata.extend([c for c in space.str_w(list_w[i])]) return W_BytearrayObject(newdata) # These methods could just delegate to the string implementation, From fijal at codespeak.net Thu Sep 30 12:29:22 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Thu, 30 Sep 2010 12:29:22 +0200 (CEST) Subject: [pypy-svn] r77486 - pypy/trunk/pypy/jit/metainterp/optimizeopt Message-ID: <20100930102922.E5BB0282BE8@codespeak.net> Author: fijal Date: Thu Sep 30 12:29:21 2010 New Revision: 77486 Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/__init__.py Log: Disable string ops. Breaks tests, let's see if fixes pypy-c-jit issues Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/__init__.py ============================================================================== --- pypy/trunk/pypy/jit/metainterp/optimizeopt/__init__.py (original) +++ pypy/trunk/pypy/jit/metainterp/optimizeopt/__init__.py Thu Sep 30 12:29:21 2010 @@ -14,7 +14,7 @@ optimizations = [OptIntBounds(), OptRewrite(), OptVirtualize(), - OptString(), +# OptString(), OptHeap(), ] optimizer = Optimizer(metainterp_sd, loop, optimizations, virtuals) From fijal at codespeak.net Thu Sep 30 13:19:53 2010 From: fijal at codespeak.net (fijal at codespeak.net) Date: Thu, 30 Sep 2010 13:19:53 +0200 (CEST) Subject: [pypy-svn] r77487 - pypy/branch/jit-profiling Message-ID: <20100930111953.E08C8282BE8@codespeak.net> Author: fijal Date: Thu Sep 30 13:19:52 2010 New Revision: 77487 Added: pypy/branch/jit-profiling/ (props changed) - copied from r77486, pypy/trunk/ Log: More experiments on profiling jitted code From arigo at codespeak.net Thu Sep 30 13:37:37 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 30 Sep 2010 13:37:37 +0200 (CEST) Subject: [pypy-svn] r77488 - pypy/branch/minimark-free/pypy/rpython/lltypesystem Message-ID: <20100930113737.066A5282BE8@codespeak.net> Author: arigo Date: Thu Sep 30 13:37:36 2010 New Revision: 77488 Modified: pypy/branch/minimark-free/pypy/rpython/lltypesystem/rstr.py Log: Merge this from trunk. Modified: pypy/branch/minimark-free/pypy/rpython/lltypesystem/rstr.py ============================================================================== --- pypy/branch/minimark-free/pypy/rpython/lltypesystem/rstr.py (original) +++ pypy/branch/minimark-free/pypy/rpython/lltypesystem/rstr.py Thu Sep 30 13:37:36 2010 @@ -703,6 +703,7 @@ s1.copy_contents(s1, newstr, start, 0, lgt) return newstr _ll_stringslice.oopspec = 'stroruni.slice(s1, start, stop)' + _ll_stringslice._annenforceargs_ = [None, int, int] def ll_stringslice_startonly(s1, start): return LLHelpers._ll_stringslice(s1, start, len(s1.chars)) From antocuni at codespeak.net Thu Sep 30 14:17:26 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Thu, 30 Sep 2010 14:17:26 +0200 (CEST) Subject: [pypy-svn] r77489 - in pypy/branch/jitffi/pypy/jit/backend: llgraph test Message-ID: <20100930121726.D4C66282BE8@codespeak.net> Author: antocuni Date: Thu Sep 30 14:17:25 2010 New Revision: 77489 Modified: pypy/branch/jitffi/pypy/jit/backend/llgraph/llimpl.py pypy/branch/jitffi/pypy/jit/backend/llgraph/runner.py pypy/branch/jitffi/pypy/jit/backend/test/runner_test.py Log: teach the llgraph backend how to CALL c functions. This makes test_direct_call.py passing for the first time :-) Modified: pypy/branch/jitffi/pypy/jit/backend/llgraph/llimpl.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/llgraph/llimpl.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/llgraph/llimpl.py Thu Sep 30 14:17:25 2010 @@ -10,7 +10,7 @@ BoxInt, BoxPtr, BoxObj, BoxFloat, REF, INT, FLOAT) from pypy.jit.codewriter import heaptracker -from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr +from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.ootypesystem import ootype from pypy.rpython.module.support import LLSupport, OOSupport from pypy.rpython.llinterp import LLException @@ -305,12 +305,12 @@ loop = _from_opaque(loop) loop.operations.append(Operation(opnum)) -def compile_add_descr(loop, ofs, type): +def compile_add_descr(loop, ofs, type, arg_types): from pypy.jit.backend.llgraph.runner import Descr loop = _from_opaque(loop) op = loop.operations[-1] assert isinstance(type, str) and len(type) == 1 - op.descr = Descr(ofs, type) + op.descr = Descr(ofs, type, arg_types=arg_types) def compile_add_loop_token(loop, descr): if we_are_translated(): @@ -801,7 +801,7 @@ else: raise TypeError(x) try: - return _do_call_common(func, args_in_order) + return _do_call_common(func, args_in_order, calldescr) except LLException, lle: _last_exception = lle d = {'v': None, @@ -1397,10 +1397,26 @@ def do_call_pushfloat(x): _call_args_f.append(x) -def _do_call_common(f, args_in_order=None): +kind2TYPE = { + 'i': lltype.Signed, + 'f': lltype.Float, + 'r': rffi.VOIDP, # XXX this is probably wrong + } + +def _do_call_common(f, args_in_order=None, calldescr=None): ptr = llmemory.cast_int_to_adr(f).ptr - FUNC = lltype.typeOf(ptr).TO - ARGS = FUNC.ARGS + PTR = lltype.typeOf(ptr) + if PTR == rffi.VOIDP: + # it's a pointer to a C function, so we don't have a precise + # signature: create one from the descr + ARGS = map(kind2TYPE.get, calldescr.arg_types) + RESULT = kind2TYPE[calldescr.typeinfo] + FUNC = lltype.FuncType(ARGS, RESULT) + func_to_call = rffi.cast(lltype.Ptr(FUNC), ptr) + else: + FUNC = PTR.TO + ARGS = FUNC.ARGS + func_to_call = ptr._obj._callable args = cast_call_args(ARGS, _call_args_i, _call_args_r, _call_args_f, args_in_order) del _call_args_i[:] @@ -1412,7 +1428,7 @@ result = llinterp.eval_graph(ptr._obj.graph, args) # ^^^ may raise, in which case we get an LLException else: - result = ptr._obj._callable(*args) + result = func_to_call(*args) return result def do_call_void(f): Modified: pypy/branch/jitffi/pypy/jit/backend/llgraph/runner.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/llgraph/runner.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/llgraph/runner.py Thu Sep 30 14:17:25 2010 @@ -154,7 +154,7 @@ llimpl.compile_add(c, op.getopnum()) descr = op.getdescr() if isinstance(descr, Descr): - llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo) + llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo, descr.arg_types) if isinstance(descr, history.LoopToken) and op.getopnum() != rop.JUMP: llimpl.compile_add_loop_token(c, descr) if self.is_oo and isinstance(descr, (OODescr, MethDescr)): Modified: pypy/branch/jitffi/pypy/jit/backend/test/runner_test.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/backend/test/runner_test.py (original) +++ pypy/branch/jitffi/pypy/jit/backend/test/runner_test.py Thu Sep 30 14:17:25 2010 @@ -516,6 +516,23 @@ 'int', descr=calldescr) assert res.value == func_ints(*args) + def test_call_to_c_function(self): + from pypy.rlib.libffi import CDLL, ffi_type_uchar, ffi_type_sint + libc = CDLL('libc.so.6') + c_tolower = libc.getpointer('tolower', [ffi_type_uchar], ffi_type_sint) + c_tolower.push_arg('A') + assert c_tolower.call(lltype.Signed) == ord('a') + + func_adr = llmemory.cast_ptr_to_adr(c_tolower.funcsym) + funcbox = ConstInt(heaptracker.adr2int(func_adr)) + calldescr = self.cpu.calldescrof_dynamic([ffi_type_uchar], ffi_type_sint) + res = self.execute_operation(rop.CALL, + [funcbox, BoxInt(ord('A'))], + 'int', + descr=calldescr) + assert res.value == ord('a') + + def test_field_basic(self): t_box, T_box = self.alloc_instance(self.T) fielddescr = self.cpu.fielddescrof(self.S, 'value') From antocuni at codespeak.net Thu Sep 30 14:23:14 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Thu, 30 Sep 2010 14:23:14 +0200 (CEST) Subject: [pypy-svn] r77490 - pypy/branch/jitffi/pypy/jit/metainterp/test Message-ID: <20100930122314.200E1282BE8@codespeak.net> Author: antocuni Date: Thu Sep 30 14:23:12 2010 New Revision: 77490 Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_direct_call.py Log: actually test something Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_direct_call.py ============================================================================== --- pypy/branch/jitffi/pypy/jit/metainterp/test/test_direct_call.py (original) +++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_direct_call.py Thu Sep 30 14:23:12 2010 @@ -40,5 +40,14 @@ arg1 = IntArg(1) arg0.next = arg1 n = func.call(arg0, lltype.Signed) + return n - self.meta_interp(f, [0]) + res = self.meta_interp(f, [0]) + assert res == 10 + self.check_loops({ + 'call': 1, + 'guard_no_exception': 1, + 'int_lt': 1, + 'guard_true': 1, + 'jump': 1}) + From arigo at codespeak.net Thu Sep 30 15:09:16 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 30 Sep 2010 15:09:16 +0200 (CEST) Subject: [pypy-svn] r77491 - pypy/branch/minimark-free/pypy/rpython/memory/gc Message-ID: <20100930130916.82A2D282BE8@codespeak.net> Author: arigo Date: Thu Sep 30 15:09:14 2010 New Revision: 77491 Modified: pypy/branch/minimark-free/pypy/rpython/memory/gc/minimarkpage.py Log: Fix comments. Modified: pypy/branch/minimark-free/pypy/rpython/memory/gc/minimarkpage.py ============================================================================== --- pypy/branch/minimark-free/pypy/rpython/memory/gc/minimarkpage.py (original) +++ pypy/branch/minimark-free/pypy/rpython/memory/gc/minimarkpage.py Thu Sep 30 15:09:14 2010 @@ -50,12 +50,12 @@ # PAGE_HEADER. The page is on the chained list of pages that still have # room for objects of that size, unless it is completely full. # -# - free. The page is on the chained list of free pages 'freepages' from -# its arena. +# - free: used to be partially full, and is now free again. The page is +# on the chained list of free pages 'freepages' from its arena. -# Each allocated page contains blocks of a given size, which can be in +# Each allocated page contains blocks of a given size, which can again be in # one of three states: allocated, free, or uninitialized. The uninitialized -# blocks (initially all of them) are a tail of the page. +# blocks (initially all of them) are at the tail of the page. PAGE_PTR = lltype.Ptr(lltype.ForwardReference()) PAGE_HEADER = lltype.Struct('PageHeader', @@ -214,6 +214,8 @@ if freepages == NULL: # This was the last page, so put the arena away into # arenas_lists[0]. + ll_assert(arena.nfreepages == 0, + "freepages == NULL but nfreepages > 0") arena.nextarena = self.arenas_lists[0] self.arenas_lists[0] = arena self.current_arena = ARENA_NULL @@ -242,7 +244,7 @@ def allocate_new_arena(self): - """Return in self.current_arena the arena to allocate from next.""" + """Loads in self.current_arena the arena to allocate from next.""" # # Pick an arena from 'arenas_lists[i]', with i as small as possible # but > 0. Use caching with 'min_empty_nfreepages', which guarantees @@ -261,6 +263,9 @@ self.min_empty_nfreepages = i # # No more arena with any free page. We must allocate a new arena. + if not we_are_translated(): + for a in self._all_arenas(): + assert a.nfreepages == 0 # # 'arena_base' points to the start of malloced memory; it might not # be a page-aligned address From arigo at codespeak.net Thu Sep 30 15:15:50 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 30 Sep 2010 15:15:50 +0200 (CEST) Subject: [pypy-svn] r77492 - in pypy/trunk/pypy/rpython: lltypesystem memory/gc memory/gc/test Message-ID: <20100930131550.9CB48282BE8@codespeak.net> Author: arigo Date: Thu Sep 30 15:15:49 2010 New Revision: 77492 Modified: pypy/trunk/pypy/rpython/lltypesystem/llarena.py pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py pypy/trunk/pypy/rpython/memory/gc/test/test_minimarkpage.py Log: Merge branch/minimark-free, adding support in the minimark gc to free() the memory used by minimarkpage if possible. That's similar to how CPython's obmalloc.c grew the same capability around Python 2.5. Modified: pypy/trunk/pypy/rpython/lltypesystem/llarena.py ============================================================================== --- pypy/trunk/pypy/rpython/lltypesystem/llarena.py (original) +++ pypy/trunk/pypy/rpython/lltypesystem/llarena.py Thu Sep 30 15:15:49 2010 @@ -124,6 +124,9 @@ assert self.usagemap[i] == 'x' self.usagemap[i] = '#' + def mark_freed(self): + self.freed = True # this method is a hook for tests + class fakearenaaddress(llmemory.fakeaddress): def __init__(self, arena, offset): @@ -314,7 +317,7 @@ assert arena_addr.offset == 0 arena_addr.arena.reset(False) assert not arena_addr.arena.objectptrs - arena_addr.arena.freed = True + arena_addr.arena.mark_freed() def arena_reset(arena_addr, size, zero): """Free all objects in the arena, which can then be reused. Modified: pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py Thu Sep 30 15:15:49 2010 @@ -4,15 +4,45 @@ from pypy.rlib.debug import ll_assert WORD = LONG_BIT // 8 -WORD_POWER_2 = {32: 2, 64: 3}[LONG_BIT] NULL = llmemory.NULL +WORD_POWER_2 = {32: 2, 64: 3}[LONG_BIT] +assert 1 << WORD_POWER_2 == WORD -# Terminology: the memory is subdivided into "pages". +# Terminology: the memory is subdivided into "arenas" containing "pages". # A page contains a number of allocated objects, called "blocks". -# The actual allocation occurs in whole arenas, which are subdivided -# into pages. We don't keep track of the arenas. A page can be: +# The actual allocation occurs in whole arenas, which are then subdivided +# into pages. For each arena we allocate one of the following structures: + +ARENA_PTR = lltype.Ptr(lltype.ForwardReference()) +ARENA = lltype.Struct('ArenaReference', + # -- The address of the arena, as returned by malloc() + ('base', llmemory.Address), + # -- The number of free and the total number of pages in the arena + ('nfreepages', lltype.Signed), + ('totalpages', lltype.Signed), + # -- A chained list of free pages in the arena. Ends with NULL. + ('freepages', llmemory.Address), + # -- A linked list of arenas. See below. + ('nextarena', ARENA_PTR), + ) +ARENA_PTR.TO.become(ARENA) +ARENA_NULL = lltype.nullptr(ARENA) + +# The idea is that when we need a free page, we take it from the arena +# which currently has the *lowest* number of free pages. This allows +# arenas with a lot of free pages to eventually become entirely free, at +# which point they are returned to the OS. If an arena has a total of +# 64 pages, then we have 64 global lists, arenas_lists[0] to +# arenas_lists[63], such that arenas_lists[i] contains exactly those +# arenas that have 'nfreepages == i'. We allocate pages out of the +# arena in 'current_arena'; when it is exhausted we pick another arena +# with the smallest value for nfreepages (but > 0). + +# ____________________________________________________________ +# +# Each page in an arena can be: # # - uninitialized: never touched so far. # @@ -21,10 +51,11 @@ # room for objects of that size, unless it is completely full. # # - free: used to be partially full, and is now free again. The page is -# on the chained list of free pages. +# on the chained list of free pages 'freepages' from its arena. -# Similarily, each allocated page contains blocks of a given size, which can -# be either uninitialized, allocated or free. +# Each allocated page contains blocks of a given size, which can again be in +# one of three states: allocated, free, or uninitialized. The uninitialized +# blocks (initially all of them) are at the tail of the page. PAGE_PTR = lltype.Ptr(lltype.ForwardReference()) PAGE_HEADER = lltype.Struct('PageHeader', @@ -32,13 +63,16 @@ # pages, it is a chained list of pages having the same size class, # rooted in 'page_for_size[size_class]'. For full pages, it is a # different chained list rooted in 'full_page_for_size[size_class]'. + # For free pages, it is the list 'freepages' in the arena header. ('nextpage', PAGE_PTR), - # -- The number of free blocks, and the number of uninitialized blocks. - # The number of allocated blocks is the rest. - ('nuninitialized', lltype.Signed), + # -- The arena this page is part of. + ('arena', ARENA_PTR), + # -- The number of free blocks. The numbers of uninitialized and + # allocated blocks can be deduced from the context if needed. ('nfree', lltype.Signed), - # -- The chained list of free blocks. If there are none, points to the - # first uninitialized block. + # -- The chained list of free blocks. It ends as a pointer to the + # first uninitialized block (pointing to data that is uninitialized, + # or to the end of the page). ('freeblock', llmemory.Address), # -- The structure above is 4 words, which is a good value: # '(1024-4) % N' is zero or very small for various small N's, @@ -72,13 +106,35 @@ self.nblocks_for_size = lltype.malloc(rffi.CArray(lltype.Signed), length, flavor='raw') self.hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER)) + assert page_size > self.hdrsize self.nblocks_for_size[0] = 0 # unused for i in range(1, length): self.nblocks_for_size[i] = (page_size - self.hdrsize) // (WORD * i) # - self.uninitialized_pages = NULL + self.max_pages_per_arena = arena_size // page_size + self.arenas_lists = lltype.malloc(rffi.CArray(ARENA_PTR), + self.max_pages_per_arena, + flavor='raw', zero=True) + # this is used in mass_free() only + self.old_arenas_lists = lltype.malloc(rffi.CArray(ARENA_PTR), + self.max_pages_per_arena, + flavor='raw', zero=True) + # + # the arena currently consumed; it must have at least one page + # available, or be NULL. The arena object that we point to is + # not in any 'arenas_lists'. We will consume all its pages before + # we choose a next arena, even if there is a major collection + # in-between. + self.current_arena = ARENA_NULL + # + # guarantee that 'arenas_lists[1:min_empty_nfreepages]' are all empty + self.min_empty_nfreepages = self.max_pages_per_arena + # + # part of current_arena might still contain uninitialized pages self.num_uninitialized_pages = 0 - self.free_pages = NULL + # + # the total memory used, counting every block in use, without + # the additional bookkeeping stuff. self.total_memory_used = r_uint(0) @@ -109,16 +165,12 @@ # else: # The 'result' is part of the uninitialized blocks. - ll_assert(page.nuninitialized > 0, - "fully allocated page found in the page_for_size list") - page.nuninitialized -= 1 - if page.nuninitialized > 0: - freeblock = result + nsize - else: - freeblock = NULL + freeblock = result + nsize # page.freeblock = freeblock - if freeblock == NULL: + # + pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page)) + if freeblock - pageaddr > self.page_size - nsize: # This was the last free block, so unlink the page from the # chained list and put it in the 'full_page_for_size' list. self.page_for_size[size_class] = page.nextpage @@ -132,37 +184,88 @@ def allocate_new_page(self, size_class): """Allocate and return a new page for the given size_class.""" # - if self.free_pages != NULL: + # Allocate a new arena if needed. + if self.current_arena == ARENA_NULL: + self.allocate_new_arena() + # + # The result is simply 'current_arena.freepages'. + arena = self.current_arena + result = arena.freepages + if arena.nfreepages > 0: + # + # The 'result' was part of the chained list; read the next. + arena.nfreepages -= 1 + freepages = result.address[0] + llarena.arena_reset(result, + llmemory.sizeof(llmemory.Address), + 0) # - # Get the page from the chained list 'free_pages'. - page = self.free_pages - self.free_pages = page.address[0] - llarena.arena_reset(page, llmemory.sizeof(llmemory.Address), 0) else: - # Get the next free page from the uninitialized pages. - if self.num_uninitialized_pages == 0: - self.allocate_new_arena() # Out of memory. Get a new arena. - page = self.uninitialized_pages - self.uninitialized_pages += self.page_size + # The 'result' is part of the uninitialized pages. + ll_assert(self.num_uninitialized_pages > 0, + "fully allocated arena found in self.current_arena") self.num_uninitialized_pages -= 1 + if self.num_uninitialized_pages > 0: + freepages = result + self.page_size + else: + freepages = NULL # - # Initialize the fields of the resulting page - llarena.arena_reserve(page, llmemory.sizeof(PAGE_HEADER)) - result = llmemory.cast_adr_to_ptr(page, PAGE_PTR) + arena.freepages = freepages + if freepages == NULL: + # This was the last page, so put the arena away into + # arenas_lists[0]. + ll_assert(arena.nfreepages == 0, + "freepages == NULL but nfreepages > 0") + arena.nextarena = self.arenas_lists[0] + self.arenas_lists[0] = arena + self.current_arena = ARENA_NULL # - result.nuninitialized = self.nblocks_for_size[size_class] - result.nfree = 0 - result.freeblock = page + self.hdrsize - result.nextpage = PAGE_NULL + # Initialize the fields of the resulting page + llarena.arena_reserve(result, llmemory.sizeof(PAGE_HEADER)) + page = llmemory.cast_adr_to_ptr(result, PAGE_PTR) + page.arena = arena + page.nfree = 0 + page.freeblock = result + self.hdrsize + page.nextpage = PAGE_NULL ll_assert(self.page_for_size[size_class] == PAGE_NULL, "allocate_new_page() called but a page is already waiting") - self.page_for_size[size_class] = result - return result + self.page_for_size[size_class] = page + return page + + + def _all_arenas(self): + """For testing. Enumerates all arenas.""" + if self.current_arena: + yield self.current_arena + for arena in self.arenas_lists: + while arena: + yield arena + arena = arena.nextarena def allocate_new_arena(self): - ll_assert(self.num_uninitialized_pages == 0, - "some uninitialized pages are already waiting") + """Loads in self.current_arena the arena to allocate from next.""" + # + # Pick an arena from 'arenas_lists[i]', with i as small as possible + # but > 0. Use caching with 'min_empty_nfreepages', which guarantees + # that 'arenas_lists[1:min_empty_nfreepages]' are all empty. + i = self.min_empty_nfreepages + while i < self.max_pages_per_arena: + # + if self.arenas_lists[i] != ARENA_NULL: + # + # Found it. + self.current_arena = self.arenas_lists[i] + self.arenas_lists[i] = self.current_arena.nextarena + return + # + i += 1 + self.min_empty_nfreepages = i + # + # No more arena with any free page. We must allocate a new arena. + if not we_are_translated(): + for a in self._all_arenas(): + assert a.nfreepages == 0 # # 'arena_base' points to the start of malloced memory; it might not # be a page-aligned address @@ -177,13 +280,15 @@ # 'npages' is the number of full pages just allocated npages = (arena_end - firstpage) // self.page_size # - # add these pages to the list - self.uninitialized_pages = firstpage + # Allocate an ARENA object and initialize it + arena = lltype.malloc(ARENA, flavor='raw') + arena.base = arena_base + arena.nfreepages = 0 # they are all uninitialized pages + arena.totalpages = npages + arena.freepages = firstpage self.num_uninitialized_pages = npages + self.current_arena = arena # - # increase a bit arena_size for the next time - self.arena_size = (self.arena_size // 4 * 5) + (self.page_size - 1) - self.arena_size = (self.arena_size // self.page_size) * self.page_size allocate_new_arena._dont_inline_ = True @@ -199,16 +304,51 @@ # # Walk the pages in 'page_for_size[size_class]' and # 'full_page_for_size[size_class]' and free some objects. - # Pages completely freed are added to 'self.free_pages', and - # become available for reuse by any size class. Pages not - # completely freed are re-chained either in + # Pages completely freed are added to 'page.arena.freepages', + # and become available for reuse by any size class. Pages + # not completely freed are re-chained either in # 'full_page_for_size[]' or 'page_for_size[]'. - self.mass_free_in_page(size_class, ok_to_free_func) + self.mass_free_in_pages(size_class, ok_to_free_func) # size_class -= 1 + # + # Rehash arenas into the correct arenas_lists[i]. If + # 'self.current_arena' contains an arena too, it remains there. + (self.old_arenas_lists, self.arenas_lists) = ( + self.arenas_lists, self.old_arenas_lists) + # + i = 0 + while i < self.max_pages_per_arena: + self.arenas_lists[i] = ARENA_NULL + i += 1 + # + i = 0 + while i < self.max_pages_per_arena: + arena = self.old_arenas_lists[i] + while arena != ARENA_NULL: + nextarena = arena.nextarena + # + if arena.nfreepages == arena.totalpages: + # + # The whole arena is empty. Free it. + llarena.arena_free(arena.base) + lltype.free(arena, flavor='raw') + # + else: + # Insert 'arena' in the correct arenas_lists[n] + n = arena.nfreepages + ll_assert(n < self.max_pages_per_arena, + "totalpages != nfreepages >= max_pages_per_arena") + arena.nextarena = self.arenas_lists[n] + self.arenas_lists[n] = arena + # + arena = nextarena + i += 1 + # + self.min_empty_nfreepages = 1 - def mass_free_in_page(self, size_class, ok_to_free_func): + def mass_free_in_pages(self, size_class, ok_to_free_func): nblocks = self.nblocks_for_size[size_class] block_size = size_class * WORD remaining_partial_pages = PAGE_NULL @@ -224,8 +364,7 @@ while page != PAGE_NULL: # # Collect the page. - surviving = self.walk_page(page, block_size, - nblocks, ok_to_free_func) + surviving = self.walk_page(page, block_size, ok_to_free_func) nextpage = page.nextpage # if surviving == nblocks: @@ -259,19 +398,23 @@ def free_page(self, page): """Free a whole page.""" # - # Done by inserting it in the 'free_pages' list. + # Insert the freed page in the arena's 'freepages' list. + # If nfreepages == totalpages, then it will be freed at the + # end of mass_free(). + arena = page.arena + arena.nfreepages += 1 pageaddr = llmemory.cast_ptr_to_adr(page) pageaddr = llarena.getfakearenaaddress(pageaddr) llarena.arena_reset(pageaddr, self.page_size, 0) llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address)) - pageaddr.address[0] = self.free_pages - self.free_pages = pageaddr + pageaddr.address[0] = arena.freepages + arena.freepages = pageaddr - def walk_page(self, page, block_size, nblocks, ok_to_free_func): + def walk_page(self, page, block_size, ok_to_free_func): """Walk over all objects in a page, and ask ok_to_free_func().""" # - # 'freeblock' is the next free block, or NULL if there isn't any more. + # 'freeblock' is the next free block freeblock = page.freeblock # # 'prevfreeblockat' is the address of where 'freeblock' was read from. @@ -281,22 +424,28 @@ obj = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page)) obj += self.hdrsize surviving = 0 # initially + skip_free_blocks = page.nfree # - nblocks -= page.nuninitialized - index = nblocks - while index > 0: + while True: # if obj == freeblock: # + if skip_free_blocks == 0: + # + # 'obj' points to the first uninitialized block, + # or to the end of the page if there are none. + break + # # 'obj' points to a free block. It means that # 'prevfreeblockat.address[0]' does not need to be updated. # Just read the next free block from 'obj.address[0]'. + skip_free_blocks -= 1 prevfreeblockat = obj freeblock = obj.address[0] # else: # 'obj' points to a valid object. - ll_assert(not freeblock or freeblock > obj, + ll_assert(freeblock > obj, "freeblocks are linked out of order") # if ok_to_free_func(obj): @@ -310,15 +459,14 @@ prevfreeblockat = obj obj.address[0] = freeblock # + # Update the number of free objects in the page. + page.nfree += 1 + # else: # The object survives. surviving += 1 # obj += block_size - index -= 1 - # - # Update the number of free objects in the page. - page.nfree = nblocks - surviving # # Update the global total size of objects. self.total_memory_used += surviving * block_size @@ -327,6 +475,20 @@ return surviving + def _nuninitialized(self, page, size_class): + # Helper for debugging: count the number of uninitialized blocks + freeblock = page.freeblock + for i in range(page.nfree): + freeblock = freeblock.address[0] + assert freeblock != NULL + pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page)) + num_initialized_blocks, rem = divmod( + freeblock - pageaddr - self.hdrsize, size_class * WORD) + assert rem == 0, "page size_class misspecified?" + nblocks = self.nblocks_for_size[size_class] + return nblocks - num_initialized_blocks + + # ____________________________________________________________ # Helpers to go from a pointer to the start of its page Modified: pypy/trunk/pypy/rpython/memory/gc/test/test_minimarkpage.py ============================================================================== --- pypy/trunk/pypy/rpython/memory/gc/test/test_minimarkpage.py (original) +++ pypy/trunk/pypy/rpython/memory/gc/test/test_minimarkpage.py Thu Sep 30 15:15:49 2010 @@ -12,17 +12,19 @@ def test_allocate_arena(): - ac = ArenaCollection(SHIFT + 16*20, 16, 1) + ac = ArenaCollection(SHIFT + 64*20, 64, 1) ac.allocate_new_arena() assert ac.num_uninitialized_pages == 20 - ac.uninitialized_pages + 16*20 # does not raise - py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 16*20 + 1") + upages = ac.current_arena.freepages + upages + 64*20 # does not raise + py.test.raises(llarena.ArenaError, "upages + 64*20 + 1") # - ac = ArenaCollection(SHIFT + 16*20 + 7, 16, 1) + ac = ArenaCollection(SHIFT + 64*20 + 7, 64, 1) ac.allocate_new_arena() assert ac.num_uninitialized_pages == 20 - ac.uninitialized_pages + 16*20 + 7 # does not raise - py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 16*20 + 16") + upages = ac.current_arena.freepages + upages + 64*20 + 7 # does not raise + py.test.raises(llarena.ArenaError, "upages + 64*20 + 64") def test_allocate_new_page(): @@ -31,7 +33,8 @@ # def checknewpage(page, size_class): size = WORD * size_class - assert page.nuninitialized == (pagesize - hdrsize) // size + assert (ac._nuninitialized(page, size_class) == + (pagesize - hdrsize) // size) assert page.nfree == 0 page1 = page.freeblock - hdrsize assert llmemory.cast_ptr_to_adr(page) == page1 @@ -44,13 +47,13 @@ page = ac.allocate_new_page(5) checknewpage(page, 5) assert ac.num_uninitialized_pages == 2 - assert ac.uninitialized_pages - pagesize == cast_ptr_to_adr(page) + assert ac.current_arena.freepages - pagesize == cast_ptr_to_adr(page) assert ac.page_for_size[5] == page # page = ac.allocate_new_page(3) checknewpage(page, 3) assert ac.num_uninitialized_pages == 1 - assert ac.uninitialized_pages - pagesize == cast_ptr_to_adr(page) + assert ac.current_arena.freepages - pagesize == cast_ptr_to_adr(page) assert ac.page_for_size[3] == page # page = ac.allocate_new_page(4) @@ -71,17 +74,17 @@ page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR) if step == 1: page.nfree = 0 - page.nuninitialized = nblocks - nusedblocks + nuninitialized = nblocks - nusedblocks else: page.nfree = nusedblocks - page.nuninitialized = nblocks - 2*nusedblocks + nuninitialized = nblocks - 2*nusedblocks + page.freeblock = pageaddr + hdrsize + nusedblocks * size_block if nusedblocks < nblocks: - page.freeblock = pageaddr + hdrsize + nusedblocks * size_block chainedlists = ac.page_for_size else: - page.freeblock = NULL chainedlists = ac.full_page_for_size page.nextpage = chainedlists[size_class] + page.arena = ac.current_arena chainedlists[size_class] = page if fill_with_objects: for i in range(0, nusedblocks*step, step): @@ -98,11 +101,15 @@ prev = 'prevhole.address[0]' endaddr = pageaddr + hdrsize + 2*nusedblocks * size_block exec '%s = endaddr' % prev in globals(), locals() + assert ac._nuninitialized(page, size_class) == nuninitialized # ac.allocate_new_arena() num_initialized_pages = len(pagelayout.rstrip(" ")) - ac._startpageaddr = ac.uninitialized_pages - ac.uninitialized_pages += pagesize * num_initialized_pages + ac._startpageaddr = ac.current_arena.freepages + if pagelayout.endswith(" "): + ac.current_arena.freepages += pagesize * num_initialized_pages + else: + ac.current_arena.freepages = NULL ac.num_uninitialized_pages -= num_initialized_pages # for i in reversed(range(num_initialized_pages)): @@ -115,8 +122,9 @@ link(pageaddr, size_class, size_block, nblocks, nblocks-1) elif c == '.': # a free, but initialized, page llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address)) - pageaddr.address[0] = ac.free_pages - ac.free_pages = pageaddr + pageaddr.address[0] = ac.current_arena.freepages + ac.current_arena.freepages = pageaddr + ac.current_arena.nfreepages += 1 elif c == '#': # a random full page, in the list 'full_pages' size_class = fill_with_objects or 1 size_block = WORD * size_class @@ -142,26 +150,29 @@ def checkpage(ac, page, expected_position): assert llmemory.cast_ptr_to_adr(page) == pagenum(ac, expected_position) +def freepages(ac): + return ac.current_arena.freepages + def test_simple_arena_collection(): pagesize = hdrsize + 16 ac = arena_collection_for_test(pagesize, "##....# ") # - assert ac.free_pages == pagenum(ac, 2) + assert freepages(ac) == pagenum(ac, 2) page = ac.allocate_new_page(1); checkpage(ac, page, 2) - assert ac.free_pages == pagenum(ac, 3) + assert freepages(ac) == pagenum(ac, 3) page = ac.allocate_new_page(2); checkpage(ac, page, 3) - assert ac.free_pages == pagenum(ac, 4) + assert freepages(ac) == pagenum(ac, 4) page = ac.allocate_new_page(3); checkpage(ac, page, 4) - assert ac.free_pages == pagenum(ac, 5) + assert freepages(ac) == pagenum(ac, 5) page = ac.allocate_new_page(4); checkpage(ac, page, 5) - assert ac.free_pages == NULL and ac.num_uninitialized_pages == 3 + assert freepages(ac) == pagenum(ac, 7) and ac.num_uninitialized_pages == 3 page = ac.allocate_new_page(5); checkpage(ac, page, 7) - assert ac.free_pages == NULL and ac.num_uninitialized_pages == 2 + assert freepages(ac) == pagenum(ac, 8) and ac.num_uninitialized_pages == 2 page = ac.allocate_new_page(6); checkpage(ac, page, 8) - assert ac.free_pages == NULL and ac.num_uninitialized_pages == 1 + assert freepages(ac) == pagenum(ac, 9) and ac.num_uninitialized_pages == 1 page = ac.allocate_new_page(7); checkpage(ac, page, 9) - assert ac.free_pages == NULL and ac.num_uninitialized_pages == 0 + assert not ac.current_arena and ac.num_uninitialized_pages == 0 def chkob(ac, num_page, pos_obj, obj): @@ -205,47 +216,47 @@ ac = arena_collection_for_test(pagesize, "/.", fill_with_objects=2) page = getpage(ac, 0) assert page.nfree == 3 - assert page.nuninitialized == 3 + assert ac._nuninitialized(page, 2) == 3 chkob(ac, 0, 2*WORD, page.freeblock) # obj = ac.malloc(2*WORD); chkob(ac, 0, 2*WORD, obj) obj = ac.malloc(2*WORD); chkob(ac, 0, 6*WORD, obj) assert page.nfree == 1 - assert page.nuninitialized == 3 + assert ac._nuninitialized(page, 2) == 3 chkob(ac, 0, 10*WORD, page.freeblock) # obj = ac.malloc(2*WORD); chkob(ac, 0, 10*WORD, obj) assert page.nfree == 0 - assert page.nuninitialized == 3 + assert ac._nuninitialized(page, 2) == 3 chkob(ac, 0, 12*WORD, page.freeblock) # obj = ac.malloc(2*WORD); chkob(ac, 0, 12*WORD, obj) - assert page.nuninitialized == 2 + assert ac._nuninitialized(page, 2) == 2 obj = ac.malloc(2*WORD); chkob(ac, 0, 14*WORD, obj) obj = ac.malloc(2*WORD); chkob(ac, 0, 16*WORD, obj) assert page.nfree == 0 - assert page.nuninitialized == 0 + assert ac._nuninitialized(page, 2) == 0 obj = ac.malloc(2*WORD); chkob(ac, 1, 0*WORD, obj) def test_malloc_new_arena(): pagesize = hdrsize + 7*WORD ac = arena_collection_for_test(pagesize, "### ") + arena_size = ac.arena_size obj = ac.malloc(2*WORD); chkob(ac, 3, 0*WORD, obj) # 3rd page -> size 2 # del ac.allocate_new_arena # restore the one from the class - arena_size = ac.arena_size obj = ac.malloc(3*WORD) # need a new arena assert ac.num_uninitialized_pages == (arena_size // ac.page_size - - 1 # for start_of_page() - 1 # the just-allocated page ) class OkToFree(object): - def __init__(self, ac, answer): + def __init__(self, ac, answer, multiarenas=False): assert callable(answer) or 0.0 <= answer <= 1.0 self.ac = ac self.answer = answer + self.multiarenas = multiarenas self.lastnum = 0.0 self.seen = {} @@ -257,7 +268,10 @@ ok_to_free = self.lastnum >= 1.0 if ok_to_free: self.lastnum -= 1.0 - key = addr - self.ac._startpageaddr + if self.multiarenas: + key = (addr.arena, addr.offset) + else: + key = addr - self.ac._startpageaddr assert key not in self.seen self.seen[key] = ok_to_free return ok_to_free @@ -272,10 +286,10 @@ page = getpage(ac, 0) assert page == ac.page_for_size[2] assert page.nextpage == PAGE_NULL - assert page.nuninitialized == 1 + assert ac._nuninitialized(page, 2) == 1 assert page.nfree == 0 chkob(ac, 0, 4*WORD, page.freeblock) - assert ac.free_pages == NULL + assert freepages(ac) == NULL def test_mass_free_emptied_page(): pagesize = hdrsize + 7*WORD @@ -285,7 +299,7 @@ assert ok_to_free.seen == {hdrsize + 0*WORD: True, hdrsize + 2*WORD: True} pageaddr = pagenum(ac, 0) - assert pageaddr == ac.free_pages + assert pageaddr == freepages(ac) assert pageaddr.address[0] == NULL assert ac.page_for_size[2] == PAGE_NULL @@ -300,10 +314,9 @@ page = getpage(ac, 0) assert page == ac.full_page_for_size[2] assert page.nextpage == PAGE_NULL - assert page.nuninitialized == 0 + assert ac._nuninitialized(page, 2) == 0 assert page.nfree == 0 - assert page.freeblock == NULL - assert ac.free_pages == NULL + assert freepages(ac) == NULL assert ac.page_for_size[2] == PAGE_NULL def test_mass_free_full_is_partially_emptied(): @@ -319,19 +332,19 @@ pageaddr = pagenum(ac, 0) assert page == ac.page_for_size[2] assert page.nextpage == PAGE_NULL - assert page.nuninitialized == 0 + assert ac._nuninitialized(page, 2) == 0 assert page.nfree == 2 assert page.freeblock == pageaddr + hdrsize + 2*WORD assert page.freeblock.address[0] == pageaddr + hdrsize + 6*WORD - assert page.freeblock.address[0].address[0] == NULL - assert ac.free_pages == NULL + assert page.freeblock.address[0].address[0] == pageaddr + hdrsize + 8*WORD + assert freepages(ac) == NULL assert ac.full_page_for_size[2] == PAGE_NULL def test_mass_free_half_page_remains(): pagesize = hdrsize + 24*WORD ac = arena_collection_for_test(pagesize, "/", fill_with_objects=2) page = getpage(ac, 0) - assert page.nuninitialized == 4 + assert ac._nuninitialized(page, 2) == 4 assert page.nfree == 4 # ok_to_free = OkToFree(ac, False) @@ -344,7 +357,7 @@ pageaddr = pagenum(ac, 0) assert page == ac.page_for_size[2] assert page.nextpage == PAGE_NULL - assert page.nuninitialized == 4 + assert ac._nuninitialized(page, 2) == 4 assert page.nfree == 4 assert page.freeblock == pageaddr + hdrsize + 2*WORD assert page.freeblock.address[0] == pageaddr + hdrsize + 6*WORD @@ -352,14 +365,14 @@ pageaddr + hdrsize + 10*WORD assert page.freeblock.address[0].address[0].address[0] == \ pageaddr + hdrsize + 14*WORD - assert ac.free_pages == NULL + assert freepages(ac) == NULL assert ac.full_page_for_size[2] == PAGE_NULL def test_mass_free_half_page_becomes_more_free(): pagesize = hdrsize + 24*WORD ac = arena_collection_for_test(pagesize, "/", fill_with_objects=2) page = getpage(ac, 0) - assert page.nuninitialized == 4 + assert ac._nuninitialized(page, 2) == 4 assert page.nfree == 4 # ok_to_free = OkToFree(ac, 0.5) @@ -372,7 +385,7 @@ pageaddr = pagenum(ac, 0) assert page == ac.page_for_size[2] assert page.nextpage == PAGE_NULL - assert page.nuninitialized == 4 + assert ac._nuninitialized(page, 2) == 4 assert page.nfree == 6 fb = page.freeblock assert fb == pageaddr + hdrsize + 2*WORD @@ -384,7 +397,7 @@ pageaddr + hdrsize + 12*WORD assert fb.address[0].address[0].address[0].address[0].address[0] == \ pageaddr + hdrsize + 14*WORD - assert ac.free_pages == NULL + assert freepages(ac) == NULL assert ac.full_page_for_size[2] == PAGE_NULL # ____________________________________________________________ @@ -392,17 +405,29 @@ def test_random(): import random pagesize = hdrsize + 24*WORD - num_pages = 28 + num_pages = 3 ac = arena_collection_for_test(pagesize, " " * num_pages) live_objects = {} # - # Run the test until ac.allocate_new_arena() is called. + # Run the test until three arenas are freed. This is a quick test + # that the arenas are really freed by the logic. class DoneTesting(Exception): - pass - def done_testing(): - raise DoneTesting - ac.allocate_new_arena = done_testing - # + counter = 0 + def my_allocate_new_arena(): + # the following output looks cool on a 112-character-wide terminal. + lst = sorted(ac._all_arenas(), key=lambda a: a.base.arena._arena_index) + for a in lst: + print a.base.arena, a.base.arena.usagemap + print '-' * 80 + ac.__class__.allocate_new_arena(ac) + a = ac.current_arena.base.arena + def my_mark_freed(): + a.freed = True + DoneTesting.counter += 1 + if DoneTesting.counter > 3: + raise DoneTesting + a.mark_freed = my_mark_freed + ac.allocate_new_arena = my_allocate_new_arena try: while True: # @@ -410,12 +435,13 @@ for i in range(random.randrange(50, 100)): size_class = random.randrange(1, 7) obj = ac.malloc(size_class * WORD) - at = obj - ac._startpageaddr + at = (obj.arena, obj.offset) assert at not in live_objects live_objects[at] = size_class * WORD # # Free half the objects, randomly - ok_to_free = OkToFree(ac, lambda obj: random.random() < 0.5) + ok_to_free = OkToFree(ac, lambda obj: random.random() < 0.5, + multiarenas=True) ac.mass_free(ok_to_free) # # Check that we have seen all objects @@ -428,5 +454,4 @@ surviving_total_size += live_objects[at] assert ac.total_memory_used == surviving_total_size except DoneTesting: - # the following output looks cool on a 112-character-wide terminal. - print ac._startpageaddr.arena.usagemap + pass From arigo at codespeak.net Thu Sep 30 15:16:10 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 30 Sep 2010 15:16:10 +0200 (CEST) Subject: [pypy-svn] r77493 - pypy/branch/minimark-free Message-ID: <20100930131610.840EB282BE8@codespeak.net> Author: arigo Date: Thu Sep 30 15:16:08 2010 New Revision: 77493 Removed: pypy/branch/minimark-free/ Log: Remove merged branch. From arigo at codespeak.net Thu Sep 30 15:18:29 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 30 Sep 2010 15:18:29 +0200 (CEST) Subject: [pypy-svn] r77494 - pypy/branch/jit-str-fix Message-ID: <20100930131829.6D98A282BE8@codespeak.net> Author: arigo Date: Thu Sep 30 15:18:28 2010 New Revision: 77494 Added: pypy/branch/jit-str-fix/ - copied from r77493, pypy/trunk/ Log: A branch in which to try to fix the issues shown by the merge of the jit-str branch. From afa at codespeak.net Thu Sep 30 15:43:47 2010 From: afa at codespeak.net (afa at codespeak.net) Date: Thu, 30 Sep 2010 15:43:47 +0200 (CEST) Subject: [pypy-svn] r77495 - pypy/branch/fast-forward/lib-python Message-ID: <20100930134347.955E7282BE8@codespeak.net> Author: afa Date: Thu Sep 30 15:43:46 2010 New Revision: 77495 Modified: pypy/branch/fast-forward/lib-python/TODO Log: Update 2.7 TODO list Modified: pypy/branch/fast-forward/lib-python/TODO ============================================================================== --- pypy/branch/fast-forward/lib-python/TODO (original) +++ pypy/branch/fast-forward/lib-python/TODO Thu Sep 30 15:43:46 2010 @@ -24,6 +24,8 @@ @test_support.cpython_only +- (list|str|unicode|bytearray).(index|find) should accept None as indices + Medium tasks ------------ @@ -49,3 +51,7 @@ - In socket.py, """The implementation currently relies on reference counting to close the underlying socket object.""" +- Implement an after-fork hook (See PyOS_AfterFork in CPython) to clear thread + state in a forked interpreter. + Then unskip test_3_join_in_forked_from_thread() in test_threading.py. + From antocuni at codespeak.net Thu Sep 30 15:58:23 2010 From: antocuni at codespeak.net (antocuni at codespeak.net) Date: Thu, 30 Sep 2010 15:58:23 +0200 (CEST) Subject: [pypy-svn] r77496 - pypy/branch/jitffi/pypy/rlib Message-ID: <20100930135823.2047E282BE8@codespeak.net> Author: antocuni Date: Thu Sep 30 15:58:21 2010 New Revision: 77496 Modified: pypy/branch/jitffi/pypy/rlib/clibffi.py Log: fix potential memory leak Modified: pypy/branch/jitffi/pypy/rlib/clibffi.py ============================================================================== --- pypy/branch/jitffi/pypy/rlib/clibffi.py (original) +++ pypy/branch/jitffi/pypy/rlib/clibffi.py Thu Sep 30 15:58:21 2010 @@ -558,8 +558,10 @@ """Load the library, or raises DLOpenError.""" self.lib = lltype.nullptr(rffi.CCHARP.TO) ll_libname = rffi.str2charp(libname) - self.lib = dlopen(ll_libname) - lltype.free(ll_libname, flavor='raw') + try: + self.lib = dlopen(ll_libname) + finally: + lltype.free(ll_libname, flavor='raw') def __del__(self): if self.lib: From arigo at codespeak.net Thu Sep 30 17:04:25 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 30 Sep 2010 17:04:25 +0200 (CEST) Subject: [pypy-svn] r77497 - pypy/branch/jit-str-fix/pypy/jit/metainterp/optimizeopt Message-ID: <20100930150425.6F9E6282BE8@codespeak.net> Author: arigo Date: Thu Sep 30 17:04:23 2010 New Revision: 77497 Modified: pypy/branch/jit-str-fix/pypy/jit/metainterp/optimizeopt/__init__.py Log: Re-enable this. Modified: pypy/branch/jit-str-fix/pypy/jit/metainterp/optimizeopt/__init__.py ============================================================================== --- pypy/branch/jit-str-fix/pypy/jit/metainterp/optimizeopt/__init__.py (original) +++ pypy/branch/jit-str-fix/pypy/jit/metainterp/optimizeopt/__init__.py Thu Sep 30 17:04:23 2010 @@ -14,7 +14,7 @@ optimizations = [OptIntBounds(), OptRewrite(), OptVirtualize(), -# OptString(), + OptString(), OptHeap(), ] optimizer = Optimizer(metainterp_sd, loop, optimizations, virtuals) From arigo at codespeak.net Thu Sep 30 17:05:07 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 30 Sep 2010 17:05:07 +0200 (CEST) Subject: [pypy-svn] r77498 - in pypy/branch/jit-str-fix/pypy: jit/metainterp jit/tl translator translator/goal Message-ID: <20100930150507.38B81282BE8@codespeak.net> Author: arigo Date: Thu Sep 30 17:05:05 2010 New Revision: 77498 Added: pypy/branch/jit-str-fix/pypy/jit/tl/jittest.py Modified: pypy/branch/jit-str-fix/pypy/jit/metainterp/warmspot.py pypy/branch/jit-str-fix/pypy/translator/driver.py pypy/branch/jit-str-fix/pypy/translator/goal/translate.py Log: Add the target --jittest to translate.py. Works like pypyjit.py, but for any jit target. Modified: pypy/branch/jit-str-fix/pypy/jit/metainterp/warmspot.py ============================================================================== --- pypy/branch/jit-str-fix/pypy/jit/metainterp/warmspot.py (original) +++ pypy/branch/jit-str-fix/pypy/jit/metainterp/warmspot.py Thu Sep 30 17:05:05 2010 @@ -67,9 +67,16 @@ def jittify_and_run(interp, graph, args, repeat=1, backendopt=False, trace_limit=sys.maxint, debug_level=DEBUG_STEPS, inline=False, **kwds): + from pypy.config.config import ConfigError translator = interp.typer.annotator.translator - translator.config.translation.gc = "boehm" - translator.config.translation.list_comprehension_operations = True + try: + translator.config.translation.gc = "boehm" + except ConfigError: + pass + try: + translator.config.translation.list_comprehension_operations = True + except ConfigError: + pass warmrunnerdesc = WarmRunnerDesc(translator, backendopt=backendopt, **kwds) for jd in warmrunnerdesc.jitdrivers_sd: jd.warmstate.set_param_threshold(3) # for tests Added: pypy/branch/jit-str-fix/pypy/jit/tl/jittest.py ============================================================================== --- (empty file) +++ pypy/branch/jit-str-fix/pypy/jit/tl/jittest.py Thu Sep 30 17:05:05 2010 @@ -0,0 +1,43 @@ +""" +This file is imported by pypy.translation.driver when running the +target --jittest. Feel free to hack it as needed; it is imported +only after the '---> Checkpoint' fork. +""" + +from pypy.conftest import option +from pypy.rpython.lltypesystem import lltype +from pypy.rpython.llinterp import LLInterpreter +from pypy.rpython.annlowlevel import llstr +from pypy.jit.metainterp import warmspot +from pypy.rlib.jit import OPTIMIZER_FULL + + +ARGS = ["jittest", "10000"] + + +def jittest(driver): + graph = driver.translator.graphs[0] + interp = LLInterpreter(driver.translator.rtyper, malloc_check=False) + + def returns_null(T, *args, **kwds): + return lltype.nullptr(T) + interp.heap.malloc_nonmovable = returns_null # XXX + + get_policy = driver.extra['jitpolicy'] + jitpolicy = get_policy(driver) + + from pypy.jit.backend.llgraph.runner import LLtypeCPU + apply_jit(jitpolicy, interp, graph, LLtypeCPU) + + +def apply_jit(policy, interp, graph, CPUClass): + print 'warmspot.jittify_and_run() started...' + option.view = True + LIST = graph.getargs()[0].concretetype + lst = LIST.TO.ll_newlist(len(ARGS)) + for i, arg in enumerate(ARGS): + lst.ll_setitem_fast(i, llstr(arg)) + warmspot.jittify_and_run(interp, graph, [lst], policy=policy, + listops=True, CPUClass=CPUClass, + backendopt=True, inline=True, + optimizer=OPTIMIZER_FULL) Modified: pypy/branch/jit-str-fix/pypy/translator/driver.py ============================================================================== --- pypy/branch/jit-str-fix/pypy/translator/driver.py (original) +++ pypy/branch/jit-str-fix/pypy/translator/driver.py Thu Sep 30 17:05:05 2010 @@ -426,6 +426,22 @@ [OOTYPE], "JIT compiler generation") + def task_jittest_lltype(self): + """ Run with the JIT on top of the llgraph backend + """ + # parent process loop: spawn a child, wait for the child to finish, + # print a message, and restart + from pypy.translator.goal import unixcheckpoint + unixcheckpoint.restartable_point(auto='run') + # load the module pypy/jit/tl/jittest.py, which you can hack at + # and restart without needing to restart the whole translation process + from pypy.jit.tl import jittest + jittest.jittest(self) + # + task_jittest_lltype = taskdef(task_jittest_lltype, + [RTYPE], + "test of the JIT on the llgraph backend") + def task_backendopt_lltype(self): """ Run all backend optimizations - lltype version """ @@ -433,7 +449,8 @@ backend_optimizations(self.translator) # task_backendopt_lltype = taskdef(task_backendopt_lltype, - [RTYPE, '??pyjitpl_lltype'], + [RTYPE, '??pyjitpl_lltype', + '??jittest_lltype'], "lltype back-end optimisations") BACKENDOPT = 'backendopt_lltype' Modified: pypy/branch/jit-str-fix/pypy/translator/goal/translate.py ============================================================================== --- pypy/branch/jit-str-fix/pypy/translator/goal/translate.py (original) +++ pypy/branch/jit-str-fix/pypy/translator/goal/translate.py Thu Sep 30 17:05:05 2010 @@ -27,6 +27,7 @@ ("annotate", "do type inference", "-a --annotate", ""), ("rtype", "do rtyping", "-t --rtype", ""), ("pyjitpl", "JIT generation step", "--pyjitpl", ""), + ("jittest", "JIT test with llgraph backend", "--jittest", ""), ("backendopt", "do backend optimizations", "--backendopt", ""), ("source", "create source", "-s --source", ""), ("compile", "compile", "-c --compile", " (default goal)"), From arigo at codespeak.net Thu Sep 30 17:32:44 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 30 Sep 2010 17:32:44 +0200 (CEST) Subject: [pypy-svn] r77499 - in pypy/branch/jit-str-fix/pypy/jit: backend/test metainterp Message-ID: <20100930153244.88454282BE8@codespeak.net> Author: arigo Date: Thu Sep 30 17:32:43 2010 New Revision: 77499 Modified: pypy/branch/jit-str-fix/pypy/jit/backend/test/test_ll_random.py pypy/branch/jit-str-fix/pypy/jit/metainterp/executor.py Log: Add COPYSTRCONTENT and COPYUNICODECONTENT to the operations generated by test_ll_random. Fix in metainterp/executor to support this. Modified: pypy/branch/jit-str-fix/pypy/jit/backend/test/test_ll_random.py ============================================================================== --- pypy/branch/jit-str-fix/pypy/jit/backend/test/test_ll_random.py (original) +++ pypy/branch/jit-str-fix/pypy/jit/backend/test/test_ll_random.py Thu Sep 30 17:32:43 2010 @@ -386,6 +386,18 @@ v_string = self.get_string(builder, r) builder.do(self.opnum, [v_string]) +class AbstractCopyContentOperation(AbstractStringOperation): + def produce_into(self, builder, r): + v_srcstring = self.get_string(builder, r) + v_dststring = self.get_string(builder, r) + srclen = len(v_srcstring.getref(self.ptr).chars) + dstlen = len(v_dststring.getref(self.ptr).chars) + v_length = builder.get_index(min(srclen, dstlen), r) + v_srcstart = builder.get_index(srclen - v_length.value + 1, r) + v_dststart = builder.get_index(dstlen - v_length.value + 1, r) + builder.do(self.opnum, [v_srcstring, v_dststring, + v_srcstart, v_dststart, v_length]) + class StrGetItemOperation(AbstractGetItemOperation, _StrOperation): pass @@ -404,6 +416,13 @@ class UnicodeLenOperation(AbstractStringLenOperation, _UnicodeOperation): pass +class CopyStrContentOperation(AbstractCopyContentOperation, _StrOperation): + pass + +class CopyUnicodeContentOperation(AbstractCopyContentOperation, + _UnicodeOperation): + pass + # there are five options in total: # 1. non raising call and guard_no_exception @@ -577,6 +596,8 @@ OPERATIONS.append(UnicodeSetItemOperation(rop.UNICODESETITEM)) OPERATIONS.append(StrLenOperation(rop.STRLEN)) OPERATIONS.append(UnicodeLenOperation(rop.UNICODELEN)) + OPERATIONS.append(CopyStrContentOperation(rop.COPYSTRCONTENT)) + OPERATIONS.append(CopyUnicodeContentOperation(rop.COPYUNICODECONTENT)) for i in range(2): OPERATIONS.append(GuardClassOperation(rop.GUARD_CLASS)) Modified: pypy/branch/jit-str-fix/pypy/jit/metainterp/executor.py ============================================================================== --- pypy/branch/jit-str-fix/pypy/jit/metainterp/executor.py (original) +++ pypy/branch/jit-str-fix/pypy/jit/metainterp/executor.py Thu Sep 30 17:32:43 2010 @@ -205,8 +205,8 @@ def do_copystrcontent(cpu, _, srcbox, dstbox, srcstartbox, dststartbox, lengthbox): - src = srcbox.getptr(lltype.Ptr(rstr.STR)) - dst = dstbox.getptr(lltype.Ptr(rstr.STR)) + src = srcbox.getref(lltype.Ptr(rstr.STR)) + dst = dstbox.getref(lltype.Ptr(rstr.STR)) srcstart = srcstartbox.getint() dststart = dststartbox.getint() length = lengthbox.getint() @@ -214,8 +214,8 @@ def do_copyunicodecontent(cpu, _, srcbox, dstbox, srcstartbox, dststartbox, lengthbox): - src = srcbox.getptr(lltype.Ptr(rstr.UNICODE)) - dst = dstbox.getptr(lltype.Ptr(rstr.UNICODE)) + src = srcbox.getref(lltype.Ptr(rstr.UNICODE)) + dst = dstbox.getref(lltype.Ptr(rstr.UNICODE)) srcstart = srcstartbox.getint() dststart = dststartbox.getint() length = lengthbox.getint() @@ -428,6 +428,10 @@ if arity == 3: func = get_execute_funclist(3, False)[opnum] return func(cpu, metainterp, argboxes[0], argboxes[1], argboxes[2]) + if arity == 5: # copystrcontent, copyunicodecontent + func = get_execute_funclist(5, False)[opnum] + return func(cpu, metainterp, argboxes[0], argboxes[1], + argboxes[2], argboxes[3], argboxes[4]) raise NotImplementedError From arigo at codespeak.net Thu Sep 30 17:33:24 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 30 Sep 2010 17:33:24 +0200 (CEST) Subject: [pypy-svn] r77500 - pypy/branch/jit-str-fix/pypy/jit/backend/test Message-ID: <20100930153324.B96D3282BE8@codespeak.net> Author: arigo Date: Thu Sep 30 17:33:23 2010 New Revision: 77500 Modified: pypy/branch/jit-str-fix/pypy/jit/backend/test/test_ll_random.py Log: COPYUNICODECONTENT is actually not used and not implemented by the x86 backend so far. Modified: pypy/branch/jit-str-fix/pypy/jit/backend/test/test_ll_random.py ============================================================================== --- pypy/branch/jit-str-fix/pypy/jit/backend/test/test_ll_random.py (original) +++ pypy/branch/jit-str-fix/pypy/jit/backend/test/test_ll_random.py Thu Sep 30 17:33:23 2010 @@ -597,7 +597,7 @@ OPERATIONS.append(StrLenOperation(rop.STRLEN)) OPERATIONS.append(UnicodeLenOperation(rop.UNICODELEN)) OPERATIONS.append(CopyStrContentOperation(rop.COPYSTRCONTENT)) - OPERATIONS.append(CopyUnicodeContentOperation(rop.COPYUNICODECONTENT)) + #OPERATIONS.append(CopyUnicodeContentOperation(rop.COPYUNICODECONTENT)) for i in range(2): OPERATIONS.append(GuardClassOperation(rop.GUARD_CLASS)) From david at codespeak.net Thu Sep 30 18:05:57 2010 From: david at codespeak.net (david at codespeak.net) Date: Thu, 30 Sep 2010 18:05:57 +0200 (CEST) Subject: [pypy-svn] r77501 - pypy/branch/arm-backend Message-ID: <20100930160557.E777C282BE8@codespeak.net> Author: david Date: Thu Sep 30 18:05:56 2010 New Revision: 77501 Added: pypy/branch/arm-backend/ (props changed) - copied from r77500, pypy/trunk/ Log: Start arm backend branch From hakanardo at codespeak.net Thu Sep 30 18:32:40 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Thu, 30 Sep 2010 18:32:40 +0200 (CEST) Subject: [pypy-svn] r77502 - pypy/branch/jit-bounds Message-ID: <20100930163240.259F0282BE8@codespeak.net> Author: hakanardo Date: Thu Sep 30 18:32:38 2010 New Revision: 77502 Removed: pypy/branch/jit-bounds/ Log: Removed merged branch From arigo at codespeak.net Thu Sep 30 18:43:42 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 30 Sep 2010 18:43:42 +0200 (CEST) Subject: [pypy-svn] r77503 - in pypy/branch/jit-str-fix/pypy/jit/backend: . test x86 Message-ID: <20100930164342.F2760282BDB@codespeak.net> Author: arigo Date: Thu Sep 30 18:43:41 2010 New Revision: 77503 Added: pypy/branch/jit-str-fix/pypy/jit/backend/conftest.py - copied, changed from r77494, pypy/branch/jit-str-fix/pypy/jit/backend/test/conftest.py Removed: pypy/branch/jit-str-fix/pypy/jit/backend/test/conftest.py Modified: pypy/branch/jit-str-fix/pypy/jit/backend/test/test_ll_random.py pypy/branch/jit-str-fix/pypy/jit/backend/test/test_random.py pypy/branch/jit-str-fix/pypy/jit/backend/x86/regalloc.py pypy/branch/jit-str-fix/pypy/jit/backend/x86/runner.py Log: More fixes. It's a complete MESS to handle efficiently operations with 5 arguments with that regalloc interface. Copied: pypy/branch/jit-str-fix/pypy/jit/backend/conftest.py (from r77494, pypy/branch/jit-str-fix/pypy/jit/backend/test/conftest.py) ============================================================================== --- pypy/branch/jit-str-fix/pypy/jit/backend/test/conftest.py (original) +++ pypy/branch/jit-str-fix/pypy/jit/backend/conftest.py Thu Sep 30 18:43:41 2010 @@ -1,3 +1,7 @@ +""" +This conftest adds options used by test/test_random and +x86/test/test_zll_random. +""" import py, random option = py.test.config.option @@ -29,4 +33,3 @@ group.addoption('--output', '-O', action="store", type="str", default="", dest="output", help="dump output to a file") - Modified: pypy/branch/jit-str-fix/pypy/jit/backend/test/test_ll_random.py ============================================================================== --- pypy/branch/jit-str-fix/pypy/jit/backend/test/test_ll_random.py (original) +++ pypy/branch/jit-str-fix/pypy/jit/backend/test/test_ll_random.py Thu Sep 30 18:43:41 2010 @@ -390,6 +390,8 @@ def produce_into(self, builder, r): v_srcstring = self.get_string(builder, r) v_dststring = self.get_string(builder, r) + if v_srcstring.value == v_dststring.value: # because it's not a + raise test_random.CannotProduceOperation # memmove(), but memcpy() srclen = len(v_srcstring.getref(self.ptr).chars) dstlen = len(v_dststring.getref(self.ptr).chars) v_length = builder.get_index(min(srclen, dstlen), r) Modified: pypy/branch/jit-str-fix/pypy/jit/backend/test/test_random.py ============================================================================== --- pypy/branch/jit-str-fix/pypy/jit/backend/test/test_random.py (original) +++ pypy/branch/jit-str-fix/pypy/jit/backend/test/test_random.py Thu Sep 30 18:43:41 2010 @@ -1,7 +1,7 @@ import py, sys from pypy.rlib.rarithmetic import intmask, LONG_BIT from pypy.rpython.lltypesystem import llmemory -from pypy.jit.backend.test import conftest as demo_conftest +from pypy.jit.backend import conftest as demo_conftest from pypy.jit.metainterp.history import BasicFailDescr, TreeLoop from pypy.jit.metainterp.history import BoxInt, ConstInt, LoopToken from pypy.jit.metainterp.history import BoxPtr, ConstPtr @@ -102,7 +102,7 @@ elif isinstance(v, ConstFloat): args.append('ConstFloat(%r)' % v.value) elif isinstance(v, ConstInt): - args.append('ConstInt(%d)' % v.value) + args.append('ConstInt(%s)' % v.value) else: raise NotImplementedError(v) if op.getdescr() is None: @@ -113,7 +113,7 @@ except AttributeError: descrstr = ', descr=...' print >>s, ' ResOperation(rop.%s, [%s], %s%s),' % ( - opname[op.opnum], ', '.join(args), names[op.result], descrstr) + opname[op.getopnum()], ', '.join(args), names[op.result], descrstr) #if getattr(op, 'suboperations', None) is not None: # subops.append(op) @@ -189,7 +189,7 @@ v.value) print >>s, ' op = cpu.execute_token(looptoken)' if self.should_fail_by is None: - fail_args = self.loop.operations[-1].args + fail_args = self.loop.operations[-1].getarglist() else: fail_args = self.should_fail_by.getfailargs() for i, v in enumerate(fail_args): Modified: pypy/branch/jit-str-fix/pypy/jit/backend/x86/regalloc.py ============================================================================== --- pypy/branch/jit-str-fix/pypy/jit/backend/x86/regalloc.py (original) +++ pypy/branch/jit-str-fix/pypy/jit/backend/x86/regalloc.py Thu Sep 30 18:43:41 2010 @@ -959,18 +959,23 @@ args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(args[0], args) ofs_loc = self.rm.make_sure_var_in_reg(args[2], args) + assert args[0] is not args[1] # forbidden case of aliasing self.rm.possibly_free_var(args[0]) - self.rm.possibly_free_var(args[2]) + if args[3] is not args[2] is not args[4]: # MESS MESS MESS: don't free + self.rm.possibly_free_var(args[2]) # it if ==args[3] or args[4] srcaddr_box = TempBox() - srcaddr_loc = self.rm.force_allocate_reg(srcaddr_box) + forbidden_vars = [args[1], args[3], args[4], srcaddr_box] + srcaddr_loc = self.rm.force_allocate_reg(srcaddr_box, forbidden_vars) self._gen_address_inside_string(base_loc, ofs_loc, srcaddr_loc) # compute the destination address - base_loc = self.rm.make_sure_var_in_reg(args[1], args) - ofs_loc = self.rm.make_sure_var_in_reg(args[3], args) + base_loc = self.rm.make_sure_var_in_reg(args[1], forbidden_vars) + ofs_loc = self.rm.make_sure_var_in_reg(args[3], forbidden_vars) self.rm.possibly_free_var(args[1]) - self.rm.possibly_free_var(args[3]) + if args[3] is not args[4]: # more of the MESS described above + self.rm.possibly_free_var(args[3]) + forbidden_vars = [args[4], srcaddr_box] dstaddr_box = TempBox() - dstaddr_loc = self.rm.force_allocate_reg(dstaddr_box) + dstaddr_loc = self.rm.force_allocate_reg(dstaddr_box, forbidden_vars) self._gen_address_inside_string(base_loc, ofs_loc, dstaddr_loc) # call memcpy() length_loc = self.loc(args[4]) Modified: pypy/branch/jit-str-fix/pypy/jit/backend/x86/runner.py ============================================================================== --- pypy/branch/jit-str-fix/pypy/jit/backend/x86/runner.py (original) +++ pypy/branch/jit-str-fix/pypy/jit/backend/x86/runner.py Thu Sep 30 18:43:41 2010 @@ -87,7 +87,9 @@ def execute_token(self, executable_token): addr = executable_token._x86_bootstrap_code + #llop.debug_print(lltype.Void, ">>>> Entering", addr) func = rffi.cast(lltype.Ptr(self.BOOTSTRAP_TP), addr) + #llop.debug_print(lltype.Void, "<<<< Back") fail_index = self._execute_call(func) return self.get_fail_descr_from_number(fail_index) @@ -99,10 +101,7 @@ LLInterpreter.current_interpreter = self.debug_ll_interpreter res = 0 try: - #llop.debug_print(lltype.Void, ">>>> Entering", - # rffi.cast(lltype.Signed, func)) res = func() - #llop.debug_print(lltype.Void, "<<<< Back") finally: if not self.translate_support_code: LLInterpreter.current_interpreter = prev_interpreter From arigo at codespeak.net Thu Sep 30 18:55:20 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 30 Sep 2010 18:55:20 +0200 (CEST) Subject: [pypy-svn] r77504 - pypy/branch/jit-str-fix/pypy/jit/tl Message-ID: <20100930165520.81A1E282BDB@codespeak.net> Author: arigo Date: Thu Sep 30 18:55:19 2010 New Revision: 77504 Modified: pypy/branch/jit-str-fix/pypy/jit/tl/jittest.py Log: Use a more reasonable default. Modified: pypy/branch/jit-str-fix/pypy/jit/tl/jittest.py ============================================================================== --- pypy/branch/jit-str-fix/pypy/jit/tl/jittest.py (original) +++ pypy/branch/jit-str-fix/pypy/jit/tl/jittest.py Thu Sep 30 18:55:19 2010 @@ -12,7 +12,7 @@ from pypy.rlib.jit import OPTIMIZER_FULL -ARGS = ["jittest", "10000"] +ARGS = ["jittest", "100"] def jittest(driver): From hakanardo at codespeak.net Thu Sep 30 19:07:54 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Thu, 30 Sep 2010 19:07:54 +0200 (CEST) Subject: [pypy-svn] r77505 - pypy/branch/jit-unroll-loops Message-ID: <20100930170754.8D285282BDB@codespeak.net> Author: hakanardo Date: Thu Sep 30 19:07:52 2010 New Revision: 77505 Added: pypy/branch/jit-unroll-loops/ (props changed) - copied from r77504, pypy/trunk/ Log: Let's kill perfect spec, unroll loops into two itterations and make bridges call the entry bridge From arigo at codespeak.net Thu Sep 30 19:15:37 2010 From: arigo at codespeak.net (arigo at codespeak.net) Date: Thu, 30 Sep 2010 19:15:37 +0200 (CEST) Subject: [pypy-svn] r77506 - pypy/branch/fast-forward/pypy/objspace/std Message-ID: <20100930171537.DCFA6282BDB@codespeak.net> Author: arigo Date: Thu Sep 30 19:15:36 2010 New Revision: 77506 Modified: pypy/branch/fast-forward/pypy/objspace/std/model.py Log: Add an assert. Used to catch cases where we accidentally pass the docstring here. Modified: pypy/branch/fast-forward/pypy/objspace/std/model.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/model.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/model.py Thu Sep 30 19:15:36 2010 @@ -368,6 +368,7 @@ self.operatorsymbol = operatorsymbol if specialnames is None: specialnames = [operatorsymbol] + assert isinstance(specialnames, list) self.specialnames = specialnames # e.g. ['__xxx__', '__rxxx__'] self.extras = extras # transform '+' => 'add' etc. From agaynor at codespeak.net Thu Sep 30 19:40:48 2010 From: agaynor at codespeak.net (agaynor at codespeak.net) Date: Thu, 30 Sep 2010 19:40:48 +0200 (CEST) Subject: [pypy-svn] r77507 - in pypy/branch/fast-forward/pypy/objspace/std: . test Message-ID: <20100930174048.DADF6282BDB@codespeak.net> Author: agaynor Date: Thu Sep 30 19:40:47 2010 New Revision: 77507 Modified: pypy/branch/fast-forward/pypy/objspace/std/longtype.py pypy/branch/fast-forward/pypy/objspace/std/register_all.py pypy/branch/fast-forward/pypy/objspace/std/test/test_longobject.py Log: long tests for abstract_numbers now pass. Modified: pypy/branch/fast-forward/pypy/objspace/std/longtype.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/longtype.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/longtype.py Thu Sep 30 19:40:47 2010 @@ -1,8 +1,17 @@ from pypy.interpreter.error import OperationError from pypy.interpreter import gateway, typedef -from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.objspace.std.register_all import register_all +from pypy.objspace.std.stdtypedef import StdTypeDef, SMM from pypy.objspace.std.strutil import string_to_w_long, ParseStringError +long_conjugate = SMM("conjugate", 1, doc="Returns self, the complex conjugate of any long.") + +def long_conjugate__ANY(space, w_int): + return space.pos(w_int) + +register_all(vars(), globals()) + + def descr__new__(space, w_longtype, w_x=0, w_base=gateway.NoneNotWrapped): from pypy.objspace.std.longobject import W_LongObject w_value = w_x # 'x' is the keyword argument name in CPython @@ -71,6 +80,12 @@ def descr_get_denominator(space, w_obj): return space.newlong(1) +def descr_get_real(space, w_obj): + return w_obj + +def descr_get_imag(space, w_obj): + return space.newlong(0) + # ____________________________________________________________ long_typedef = StdTypeDef("long", @@ -84,4 +99,7 @@ __new__ = gateway.interp2app(descr__new__), numerator = typedef.GetSetProperty(descr_get_numerator), denominator = typedef.GetSetProperty(descr_get_denominator), - ) + real = typedef.GetSetProperty(descr_get_real), + imag = typedef.GetSetProperty(descr_get_imag), +) +long_typedef.registermethods(globals()) Modified: pypy/branch/fast-forward/pypy/objspace/std/register_all.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/register_all.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/register_all.py Thu Sep 30 19:40:47 2010 @@ -21,7 +21,7 @@ if name.find('__')<1 or name.startswith('app_'): continue funcname, sig = name.split('__') - l=[] + l = [] for i in sig.split('_'): if i == 'ANY': # just in case W_ANY is not in module_dict icls = model.W_ANY Modified: pypy/branch/fast-forward/pypy/objspace/std/test/test_longobject.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/test/test_longobject.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/test/test_longobject.py Thu Sep 30 19:40:47 2010 @@ -254,3 +254,12 @@ class myotherlong(long): pass assert long(myotherlong(21)) == 21L + + def test_conjugate(self): + assert (7L).conjugate() == 7L + assert (-7L).conjugate() == -7L + + class L(long): + pass + + assert type(L(7).conjugate()) is long From agaynor at codespeak.net Thu Sep 30 20:19:33 2010 From: agaynor at codespeak.net (agaynor at codespeak.net) Date: Thu, 30 Sep 2010 20:19:33 +0200 (CEST) Subject: [pypy-svn] r77508 - pypy/branch/fast-forward/pypy/objspace/std Message-ID: <20100930181933.42981282BDB@codespeak.net> Author: agaynor Date: Thu Sep 30 20:19:31 2010 New Revision: 77508 Modified: pypy/branch/fast-forward/pypy/objspace/std/floattype.py pypy/branch/fast-forward/pypy/objspace/std/inttype.py Log: Added int.conjugate and float.conjugate Modified: pypy/branch/fast-forward/pypy/objspace/std/floattype.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/floattype.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/floattype.py Thu Sep 30 20:19:31 2010 @@ -5,6 +5,7 @@ from pypy.interpreter import gateway, typedef from pypy.interpreter.baseobjspace import ObjSpace, W_Root from pypy.interpreter.error import OperationError +from pypy.objspace.std.register_all import register_all from pypy.objspace.std.stdtypedef import StdTypeDef, SMM from pypy.objspace.std.strutil import ParseStringError from pypy.objspace.std.strutil import interp_string_to_float @@ -13,6 +14,13 @@ float_as_integer_ratio = SMM("as_integer_ratio", 1) float_hex = SMM("hex", 1) +float_conjugate = SMM("conjugate", 1, doc="Returns self, the complex conjugate of any float.") + +def float_conjugate__ANY(space, w_float): + return space.pos(w_float) + +register_all(vars(), globals()) + def descr__new__(space, w_floattype, w_x=0.0): from pypy.objspace.std.floatobject import W_FloatObject Modified: pypy/branch/fast-forward/pypy/objspace/std/inttype.py ============================================================================== --- pypy/branch/fast-forward/pypy/objspace/std/inttype.py (original) +++ pypy/branch/fast-forward/pypy/objspace/std/inttype.py Thu Sep 30 20:19:31 2010 @@ -1,6 +1,7 @@ from pypy.interpreter import gateway, typedef from pypy.interpreter.error import OperationError -from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.objspace.std.register_all import register_all +from pypy.objspace.std.stdtypedef import StdTypeDef, SMM from pypy.objspace.std.strutil import (string_to_int, string_to_w_long, ParseStringError, ParseStringOverflowError) @@ -9,6 +10,13 @@ # ____________________________________________________________ +int_conjugate = SMM("conjugate", 1, doc="Returns self, the complex conjugate of any int.") + +def int_conjugate__ANY(space, w_int): + return space.pos(w_int) + +register_all(vars(), globals()) + def wrapint(space, x): if space.config.objspace.std.withsmallint: @@ -163,3 +171,4 @@ real = typedef.GetSetProperty(descr_get_real), imag = typedef.GetSetProperty(descr_get_imag), ) +int_typedef.registermethods(globals()) From agaynor at codespeak.net Thu Sep 30 20:20:10 2010 From: agaynor at codespeak.net (agaynor at codespeak.net) Date: Thu, 30 Sep 2010 20:20:10 +0200 (CEST) Subject: [pypy-svn] r77509 - pypy/branch/fast-forward/lib-python Message-ID: <20100930182010.59136282BDB@codespeak.net> Author: agaynor Date: Thu Sep 30 20:20:08 2010 New Revision: 77509 Modified: pypy/branch/fast-forward/lib-python/TODO Log: Fixed Modified: pypy/branch/fast-forward/lib-python/TODO ============================================================================== --- pypy/branch/fast-forward/lib-python/TODO (original) +++ pypy/branch/fast-forward/lib-python/TODO Thu Sep 30 20:20:08 2010 @@ -16,8 +16,6 @@ assert eval('a', None, dict(a=42)) == 42 -- Missing (float|int).(imag|real) - - Missing complex.__trunc__ - Mark some tests as "implementation specific":: From agaynor at codespeak.net Thu Sep 30 20:56:59 2010 From: agaynor at codespeak.net (agaynor at codespeak.net) Date: Thu, 30 Sep 2010 20:56:59 +0200 (CEST) Subject: [pypy-svn] r77510 - pypy/branch/fast-forward/lib-python Message-ID: <20100930185659.DFF76282BDB@codespeak.net> Author: agaynor Date: Thu Sep 30 20:56:58 2010 New Revision: 77510 Modified: pypy/branch/fast-forward/lib-python/TODO Log: Added a note about the buildbot. Modified: pypy/branch/fast-forward/lib-python/TODO ============================================================================== --- pypy/branch/fast-forward/lib-python/TODO (original) +++ pypy/branch/fast-forward/lib-python/TODO Thu Sep 30 20:56:58 2010 @@ -1,6 +1,10 @@ TODO list for 2.7.0 =================== +You can find the results of the most recent buildbot run at: +http://buildbot.pypy.org/summary?branch=branch/fast-forward + + Probably easy tasks ------------------- From hakanardo at codespeak.net Thu Sep 30 21:55:23 2010 From: hakanardo at codespeak.net (hakanardo at codespeak.net) Date: Thu, 30 Sep 2010 21:55:23 +0200 (CEST) Subject: [pypy-svn] r77511 - in pypy/branch/jit-unroll-loops/pypy/jit/metainterp: . optimizeopt test Message-ID: <20100930195523.1DFAC282BDB@codespeak.net> Author: hakanardo Date: Thu Sep 30 21:55:22 2010 New Revision: 77511 Added: pypy/branch/jit-unroll-loops/pypy/jit/metainterp/optimizeopt/unroll.py Modified: pypy/branch/jit-unroll-loops/pypy/jit/metainterp/compile.py pypy/branch/jit-unroll-loops/pypy/jit/metainterp/optimizeopt/__init__.py pypy/branch/jit-unroll-loops/pypy/jit/metainterp/optimizeopt/optimizer.py pypy/branch/jit-unroll-loops/pypy/jit/metainterp/optimizeopt/virtualize.py pypy/branch/jit-unroll-loops/pypy/jit/metainterp/test/test_basic.py Log: Unrolls loop traces to create entry bridge and loop from the same trace allowing the loop to inherit the optimizer state at the end of the entry bridge and thus the loop invariants are removed from the loop. Virtuals not supported yet. Modified: pypy/branch/jit-unroll-loops/pypy/jit/metainterp/compile.py ============================================================================== --- pypy/branch/jit-unroll-loops/pypy/jit/metainterp/compile.py (original) +++ pypy/branch/jit-unroll-loops/pypy/jit/metainterp/compile.py Thu Sep 30 21:55:22 2010 @@ -66,6 +66,12 @@ loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd) loop.token = loop_token loop.operations[-1].setdescr(loop_token) # patch the target of the JUMP + + loop.preamble = create_empty_loop(metainterp) + loop.preamble.greenkey = greenkey + loop.preamble.inputargs = loop.inputargs + loop.preamble.token = make_loop_token(len(loop.inputargs), jitdriver_sd) + try: old_loop_token = jitdriver_sd.warmstate.optimize_loop( metainterp_sd, old_loop_tokens, loop) @@ -74,9 +80,18 @@ if old_loop_token is not None: metainterp.staticdata.log("reusing old loop") return old_loop_token - send_loop_to_backend(metainterp_sd, loop, "loop") - insert_loop_token(old_loop_tokens, loop_token) - return loop_token + + if loop.preamble.operations: + loop.token.specnodes = [prebuiltNotSpecNode] * len(loop.inputargs) # FIXME + loop.preamble.token.specnodes = [prebuiltNotSpecNode] * len(loop.preamble.inputargs) # FIXME + send_loop_to_backend(metainterp_sd, loop, "loop") + send_loop_to_backend(metainterp_sd, loop.preamble, "loop") + insert_loop_token(old_loop_tokens, loop.preamble.token) + return loop.preamble.token + else: + send_loop_to_backend(metainterp_sd, loop, "loop") + insert_loop_token(old_loop_tokens, loop_token) + return loop_token def insert_loop_token(old_loop_tokens, loop_token): # Find where in old_loop_tokens we should insert this new loop_token. Modified: pypy/branch/jit-unroll-loops/pypy/jit/metainterp/optimizeopt/__init__.py ============================================================================== --- pypy/branch/jit-unroll-loops/pypy/jit/metainterp/optimizeopt/__init__.py (original) +++ pypy/branch/jit-unroll-loops/pypy/jit/metainterp/optimizeopt/__init__.py Thu Sep 30 21:55:22 2010 @@ -4,6 +4,7 @@ from pypy.jit.metainterp.optimizeopt.virtualize import OptVirtualize from pypy.jit.metainterp.optimizeopt.heap import OptHeap from pypy.jit.metainterp.optimizeopt.string import OptString +from pypy.jit.metainterp.optimizeopt.unroll import OptUnroll def optimize_loop_1(metainterp_sd, loop, virtuals=True): """Optimize loop.operations to make it match the input of loop.specnodes @@ -11,7 +12,8 @@ must be applicable to the loop; you will probably get an AssertionError if not. """ - optimizations = [OptIntBounds(), + optimizations = [OptUnroll(), + OptIntBounds(), OptRewrite(), OptVirtualize(), # OptString(), Modified: pypy/branch/jit-unroll-loops/pypy/jit/metainterp/optimizeopt/optimizer.py ============================================================================== --- pypy/branch/jit-unroll-loops/pypy/jit/metainterp/optimizeopt/optimizer.py (original) +++ pypy/branch/jit-unroll-loops/pypy/jit/metainterp/optimizeopt/optimizer.py Thu Sep 30 21:55:22 2010 @@ -25,12 +25,14 @@ class OptValue(object): __metaclass__ = extendabletype - _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound') + _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound', + 'fromstart') last_guard_index = -1 level = LEVEL_UNKNOWN known_class = None intbound = None + fromstart = False def __init__(self, box): self.box = box Added: pypy/branch/jit-unroll-loops/pypy/jit/metainterp/optimizeopt/unroll.py ============================================================================== --- (empty file) +++ pypy/branch/jit-unroll-loops/pypy/jit/metainterp/optimizeopt/unroll.py Thu Sep 30 21:55:22 2010 @@ -0,0 +1,86 @@ +from pypy.jit.metainterp.optimizeopt.optimizer import * +from pypy.jit.metainterp.resoperation import rop, ResOperation +from pypy.jit.metainterp.compile import ResumeGuardDescr + +class OptUnroll(Optimization): + """Unroll the loop into two itterations. The first one will + become the preamble or entry bridge (don't think there is a + distinction anymore)""" + + def setup(self, virtuals): + self.enabled = virtuals + + def propagate_forward(self, op): + if not self.enabled: + self.emit_operation(op) + return + + if op.getopnum() == rop.JUMP: + loop = self.optimizer.loop + loop.preamble.operations = self.optimizer.newoperations + self.optimizer.newoperations = [] + jump_args = op.getarglist() + inputargs = self.inline(loop.preamble.operations + [op], + loop.inputargs, jump_args) + print "IN: ", inputargs + loop.inputargs = inputargs + jmp = ResOperation(rop.JUMP, loop.inputargs[:], None) + jmp.setdescr(loop.token) + loop.preamble.operations.append(jmp) + else: + self.emit_operation(op) + + def inline(self, loop_operations, loop_args, jump_args): + argmap = {} + assert len(loop_args) == len(jump_args) + for i in range(len(loop_args)): + argmap[loop_args[i]] = jump_args[i] + + print + print + print argmap + + for v in self.optimizer.values.values(): + v.fromstart = True + + inputargs = jump_args[:] + for op in loop_operations: + print "I:", op + newop = op.clone() + for i in range(op.numargs()): + a = op.getarg(i) + if not isinstance(a, Const): + newa = argmap[a] + newop.setarg(i, newa) + if op.result: + newop.result = op.result.clonebox() + argmap[op.result] = newop.result + descr = newop.getdescr() + if isinstance(descr, ResumeGuardDescr): + descr.rd_numb = None + + if newop.getopnum() == rop.JUMP: + args = newop.getarglist() + newop.initarglist(args + inputargs[len(args):]) + # FIXME: Assumes no virtuals + print "N:", newop + + current = len(self.optimizer.newoperations) + self.emit_operation(newop) + for op in self.optimizer.newoperations[current:]: + print "E:", op + for a in op.getarglist(): + if not isinstance(a, Const) and a in self.optimizer.values: + v = self.getvalue(a) + print " testing ", a + if v.fromstart and a not in inputargs: + print " ", a + inputargs.append(a) + + return inputargs + + + + + + Modified: pypy/branch/jit-unroll-loops/pypy/jit/metainterp/optimizeopt/virtualize.py ============================================================================== --- pypy/branch/jit-unroll-loops/pypy/jit/metainterp/optimizeopt/virtualize.py (original) +++ pypy/branch/jit-unroll-loops/pypy/jit/metainterp/optimizeopt/virtualize.py Thu Sep 30 21:55:22 2010 @@ -283,6 +283,8 @@ return vvalue def optimize_JUMP(self, op): + self.emit_operation(op) # FIXME + return orgop = self.optimizer.loop.operations[-1] exitargs = [] target_loop_token = orgop.getdescr() Modified: pypy/branch/jit-unroll-loops/pypy/jit/metainterp/test/test_basic.py ============================================================================== --- pypy/branch/jit-unroll-loops/pypy/jit/metainterp/test/test_basic.py (original) +++ pypy/branch/jit-unroll-loops/pypy/jit/metainterp/test/test_basic.py Thu Sep 30 21:55:22 2010 @@ -303,6 +303,124 @@ found += 1 assert found == 1 + def test_loop_invariant_mul1(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + res += x * x + y -= 1 + return res + res = self.meta_interp(f, [6, 7]) + assert res == 252 + self.check_loop_count(2) + self.check_loops({'guard_true': 1, + 'int_add': 1, 'int_sub': 1, 'int_gt': 1, + 'int_mul': 1, + 'jump': 2}) + + def test_loop_invariant_mul_ovf(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + b = y * 2 + res += ovfcheck(x * x) + b + y -= 1 + return res + res = self.meta_interp(f, [6, 7]) + assert res == 308 + self.check_loop_count(1) + self.check_loops({'guard_true': 1, 'guard_no_overflow': 1, + 'int_add': 2, 'int_sub': 1, 'int_gt': 1, + 'int_mul': 1, 'int_mul_ovf': 1, + 'jump': 1}) + + def test_loop_invariant_mul_bridge1(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + res += x * x + if y<8: + x += 1 + y -= 1 + return res + res = self.meta_interp(f, [6, 16]) + assert res == 919 + self.check_loop_count(3) + + def test_loop_invariant_mul_bridge_maintaining1(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + res += x * x + if y<8: + res += 1 + y -= 1 + return res + res = self.meta_interp(f, [6, 16]) + assert res == 583 + self.check_loop_count(3) + self.check_loops({'int_lt': 1, 'int_gt': 1, + 'guard_false': 1, 'guard_true': 1, + 'int_sub': 2, 'int_mul': 2, 'int_add': 2, + 'jump': 3}) + + def test_loop_invariant_mul_bridge_maintaining2(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + z = x * x + res += z + if y<8: + res += z + y -= 1 + return res + res = self.meta_interp(f, [6, 16]) + assert res == 828 + self.check_loop_count(3) + self.check_loops({'int_lt': 1, 'int_gt': 1, + 'guard_false': 1, 'guard_true': 1, + 'int_sub': 2, 'int_mul': 2, 'int_add': 2, + 'jump': 3}) + + def test_loop_invariant_intbox(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + class I: + __slots__ = 'intval' + _immutable_ = True + def __init__(self, intval): + self.intval = intval + def f(i, y): + res = 0 + x = I(i) + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + res += x.intval * x.intval + y -= 1 + return res + res = self.meta_interp(f, [6, 7]) + assert res == 252 + self.check_loop_count(2) + self.check_loops({'guard_true': 1, + 'int_add': 1, 'int_sub': 1, 'int_gt': 1, + 'int_mul': 1, 'getfield_gc_pure': 1, + 'jump': 2}) + def test_loops_are_transient(self): import gc, weakref myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res'])